2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18 #define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
21 #ifdef CONFIG_ARC_HAS_LLSC
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
26 #ifndef CONFIG_ARC_STAR_9000923308
28 static inline void arch_spin_lock(arch_spinlock_t *lock)
35 "1: llock %[val], [%[slock]] \n"
36 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
37 " scond %[LOCKED], [%[slock]] \n" /* acquire */
41 : [slock] "r" (&(lock->slock)),
42 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
48 /* 1 - lock taken successfully */
49 static inline int arch_spin_trylock(arch_spinlock_t *lock)
51 unsigned int val, got_it = 0;
56 "1: llock %[val], [%[slock]] \n"
57 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
58 " scond %[LOCKED], [%[slock]] \n" /* acquire */
60 " mov %[got_it], 1 \n"
64 [got_it] "+&r" (got_it)
65 : [slock] "r" (&(lock->slock)),
66 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
74 static inline void arch_spin_unlock(arch_spinlock_t *lock)
78 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
84 * Read-write spinlocks, allowing multiple readers but only one writer.
85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
88 static inline void arch_read_lock(arch_rwlock_t *rw)
95 * zero means writer holds the lock exclusively, deny Reader.
96 * Otherwise grant lock to first/subseq reader
98 * if (rw->counter > 0) {
104 __asm__ __volatile__(
105 "1: llock %[val], [%[rwlock]] \n"
106 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
107 " sub %[val], %[val], 1 \n" /* reader lock */
108 " scond %[val], [%[rwlock]] \n"
112 : [rwlock] "r" (&(rw->counter)),
119 /* 1 - lock taken successfully */
120 static inline int arch_read_trylock(arch_rwlock_t *rw)
122 unsigned int val, got_it = 0;
126 __asm__ __volatile__(
127 "1: llock %[val], [%[rwlock]] \n"
128 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
129 " sub %[val], %[val], 1 \n" /* counter-- */
130 " scond %[val], [%[rwlock]] \n"
131 " bnz 1b \n" /* retry if collided with someone */
132 " mov %[got_it], 1 \n"
134 "4: ; --- done --- \n"
137 [got_it] "+&r" (got_it)
138 : [rwlock] "r" (&(rw->counter)),
147 static inline void arch_write_lock(arch_rwlock_t *rw)
154 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 * deny writer. Otherwise if unlocked grant to writer
156 * Hence the claim that Linux rwlocks are unfair to writers.
157 * (can be starved for an indefinite time by readers).
159 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
165 __asm__ __volatile__(
166 "1: llock %[val], [%[rwlock]] \n"
167 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
168 " mov %[val], %[WR_LOCKED] \n"
169 " scond %[val], [%[rwlock]] \n"
173 : [rwlock] "r" (&(rw->counter)),
174 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
181 /* 1 - lock taken successfully */
182 static inline int arch_write_trylock(arch_rwlock_t *rw)
184 unsigned int val, got_it = 0;
188 __asm__ __volatile__(
189 "1: llock %[val], [%[rwlock]] \n"
190 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
191 " mov %[val], %[WR_LOCKED] \n"
192 " scond %[val], [%[rwlock]] \n"
193 " bnz 1b \n" /* retry if collided with someone */
194 " mov %[got_it], 1 \n"
196 "4: ; --- done --- \n"
199 [got_it] "+&r" (got_it)
200 : [rwlock] "r" (&(rw->counter)),
201 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
210 static inline void arch_read_unlock(arch_rwlock_t *rw)
219 __asm__ __volatile__(
220 "1: llock %[val], [%[rwlock]] \n"
221 " add %[val], %[val], 1 \n"
222 " scond %[val], [%[rwlock]] \n"
226 : [rwlock] "r" (&(rw->counter))
232 static inline void arch_write_unlock(arch_rwlock_t *rw)
236 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
241 #else /* CONFIG_ARC_STAR_9000923308 */
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
255 #define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
258 #define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \
264 " mov.z %[delay], 1 \n" /* handle overflow */ \
265 " b 1b \n" /* start over */ \
267 "4: ; --- done --- \n" \
269 #define SCOND_FAIL_RETRY_VARS \
270 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
272 static inline void arch_spin_lock(arch_spinlock_t *lock)
275 SCOND_FAIL_RETRY_VAR_DEF;
279 __asm__ __volatile__(
280 "0: mov %[delay], 1 \n"
281 "1: llock %[val], [%[slock]] \n"
282 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
283 " scond %[LOCKED], [%[slock]] \n" /* acquire */
284 " bz 4f \n" /* done */
289 SCOND_FAIL_RETRY_VARS
290 : [slock] "r" (&(lock->slock)),
291 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
297 /* 1 - lock taken successfully */
298 static inline int arch_spin_trylock(arch_spinlock_t *lock)
300 unsigned int val, got_it = 0;
301 SCOND_FAIL_RETRY_VAR_DEF;
305 __asm__ __volatile__(
306 "0: mov %[delay], 1 \n"
307 "1: llock %[val], [%[slock]] \n"
308 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
309 " scond %[LOCKED], [%[slock]] \n" /* acquire */
311 " mov.z %[got_it], 1 \n" /* got it */
316 [got_it] "+&r" (got_it)
317 SCOND_FAIL_RETRY_VARS
318 : [slock] "r" (&(lock->slock)),
319 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
327 static inline void arch_spin_unlock(arch_spinlock_t *lock)
331 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
337 * Read-write spinlocks, allowing multiple readers but only one writer.
338 * Unfair locking as Writers could be starved indefinitely by Reader(s)
341 static inline void arch_read_lock(arch_rwlock_t *rw)
344 SCOND_FAIL_RETRY_VAR_DEF;
349 * zero means writer holds the lock exclusively, deny Reader.
350 * Otherwise grant lock to first/subseq reader
352 * if (rw->counter > 0) {
358 __asm__ __volatile__(
359 "0: mov %[delay], 1 \n"
360 "1: llock %[val], [%[rwlock]] \n"
361 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
362 " sub %[val], %[val], 1 \n" /* reader lock */
363 " scond %[val], [%[rwlock]] \n"
364 " bz 4f \n" /* done */
369 SCOND_FAIL_RETRY_VARS
370 : [rwlock] "r" (&(rw->counter)),
377 /* 1 - lock taken successfully */
378 static inline int arch_read_trylock(arch_rwlock_t *rw)
380 unsigned int val, got_it = 0;
381 SCOND_FAIL_RETRY_VAR_DEF;
385 __asm__ __volatile__(
386 "0: mov %[delay], 1 \n"
387 "1: llock %[val], [%[rwlock]] \n"
388 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
389 " sub %[val], %[val], 1 \n" /* counter-- */
390 " scond %[val], [%[rwlock]] \n"
392 " mov.z %[got_it], 1 \n" /* got it */
397 [got_it] "+&r" (got_it)
398 SCOND_FAIL_RETRY_VARS
399 : [rwlock] "r" (&(rw->counter)),
408 static inline void arch_write_lock(arch_rwlock_t *rw)
411 SCOND_FAIL_RETRY_VAR_DEF;
416 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
417 * deny writer. Otherwise if unlocked grant to writer
418 * Hence the claim that Linux rwlocks are unfair to writers.
419 * (can be starved for an indefinite time by readers).
421 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
427 __asm__ __volatile__(
428 "0: mov %[delay], 1 \n"
429 "1: llock %[val], [%[rwlock]] \n"
430 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
431 " mov %[val], %[WR_LOCKED] \n"
432 " scond %[val], [%[rwlock]] \n"
438 SCOND_FAIL_RETRY_VARS
439 : [rwlock] "r" (&(rw->counter)),
440 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
447 /* 1 - lock taken successfully */
448 static inline int arch_write_trylock(arch_rwlock_t *rw)
450 unsigned int val, got_it = 0;
451 SCOND_FAIL_RETRY_VAR_DEF;
455 __asm__ __volatile__(
456 "0: mov %[delay], 1 \n"
457 "1: llock %[val], [%[rwlock]] \n"
458 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
459 " mov %[val], %[WR_LOCKED] \n"
460 " scond %[val], [%[rwlock]] \n"
462 " mov.z %[got_it], 1 \n" /* got it */
467 [got_it] "+&r" (got_it)
468 SCOND_FAIL_RETRY_VARS
469 : [rwlock] "r" (&(rw->counter)),
470 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
479 static inline void arch_read_unlock(arch_rwlock_t *rw)
488 __asm__ __volatile__(
489 "1: llock %[val], [%[rwlock]] \n"
490 " add %[val], %[val], 1 \n"
491 " scond %[val], [%[rwlock]] \n"
495 : [rwlock] "r" (&(rw->counter))
501 static inline void arch_write_unlock(arch_rwlock_t *rw)
508 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
510 __asm__ __volatile__(
511 "1: llock %[val], [%[rwlock]] \n"
512 " scond %[UNLOCKED], [%[rwlock]]\n"
516 : [rwlock] "r" (&(rw->counter)),
517 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
523 #undef SCOND_FAIL_RETRY_VAR_DEF
524 #undef SCOND_FAIL_RETRY_ASM
525 #undef SCOND_FAIL_RETRY_VARS
527 #endif /* CONFIG_ARC_STAR_9000923308 */
529 #else /* !CONFIG_ARC_HAS_LLSC */
531 static inline void arch_spin_lock(arch_spinlock_t *lock)
533 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
536 * This smp_mb() is technically superfluous, we only need the one
537 * after the lock for providing the ACQUIRE semantics.
538 * However doing the "right" thing was regressing hackbench
539 * so keeping this, pending further investigation
543 __asm__ __volatile__(
545 " breq %0, %2, 1b \n"
547 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
551 * ACQUIRE barrier to ensure load/store after taking the lock
552 * don't "bleed-up" out of the critical section (leak-in is allowed)
553 * http://www.spinics.net/lists/kernel/msg2010409.html
555 * ARCv2 only has load-load, store-store and all-all barrier
556 * thus need the full all-all barrier
561 /* 1 - lock taken successfully */
562 static inline int arch_spin_trylock(arch_spinlock_t *lock)
564 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
568 __asm__ __volatile__(
571 : "r"(&(lock->slock))
576 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
579 static inline void arch_spin_unlock(arch_spinlock_t *lock)
581 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
584 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
589 __asm__ __volatile__(
592 : "r"(&(lock->slock))
596 * superfluous, but keeping for now - see pairing version in
597 * arch_spin_lock above
603 * Read-write spinlocks, allowing multiple readers but only one writer.
604 * Unfair locking as Writers could be starved indefinitely by Reader(s)
606 * The spinlock itself is contained in @counter and access to it is
607 * serialized with @lock_mutex.
610 /* 1 - lock taken successfully */
611 static inline int arch_read_trylock(arch_rwlock_t *rw)
616 local_irq_save(flags);
617 arch_spin_lock(&(rw->lock_mutex));
620 * zero means writer holds the lock exclusively, deny Reader.
621 * Otherwise grant lock to first/subseq reader
623 if (rw->counter > 0) {
628 arch_spin_unlock(&(rw->lock_mutex));
629 local_irq_restore(flags);
635 /* 1 - lock taken successfully */
636 static inline int arch_write_trylock(arch_rwlock_t *rw)
641 local_irq_save(flags);
642 arch_spin_lock(&(rw->lock_mutex));
645 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
646 * deny writer. Otherwise if unlocked grant to writer
647 * Hence the claim that Linux rwlocks are unfair to writers.
648 * (can be starved for an indefinite time by readers).
650 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
654 arch_spin_unlock(&(rw->lock_mutex));
655 local_irq_restore(flags);
660 static inline void arch_read_lock(arch_rwlock_t *rw)
662 while (!arch_read_trylock(rw))
666 static inline void arch_write_lock(arch_rwlock_t *rw)
668 while (!arch_write_trylock(rw))
672 static inline void arch_read_unlock(arch_rwlock_t *rw)
676 local_irq_save(flags);
677 arch_spin_lock(&(rw->lock_mutex));
679 arch_spin_unlock(&(rw->lock_mutex));
680 local_irq_restore(flags);
683 static inline void arch_write_unlock(arch_rwlock_t *rw)
687 local_irq_save(flags);
688 arch_spin_lock(&(rw->lock_mutex));
689 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
690 arch_spin_unlock(&(rw->lock_mutex));
691 local_irq_restore(flags);
696 #define arch_read_can_lock(x) ((x)->counter > 0)
697 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
699 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
700 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
702 #define arch_spin_relax(lock) cpu_relax()
703 #define arch_read_relax(lock) cpu_relax()
704 #define arch_write_relax(lock) cpu_relax()
706 #endif /* __ASM_SPINLOCK_H */