2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
12 #include <linux/compiler.h>
14 #include <asm/barrier.h>
15 #include <asm/compiler.h>
19 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
24 * These are fair FIFO ticket locks
26 * (the type definitions are in asm/spinlock_types.h)
31 * Ticket locks are conceptually two parts, one indicating the current head of
32 * the queue, and the other indicating the current tail. The lock is acquired
33 * by atomically noting the tail and incrementing it by one (thus adding
34 * ourself to the queue and noting our position), then waiting until the head
35 * becomes equal to the the initial value of the tail.
38 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
40 u32 counters = ACCESS_ONCE(lock->lock);
42 return ((counters >> 16) ^ counters) & 0xffff;
45 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
46 #define arch_spin_unlock_wait(x) \
47 while (arch_spin_is_locked(x)) { cpu_relax(); }
49 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
51 u32 counters = ACCESS_ONCE(lock->lock);
53 return (((counters >> 16) - counters) & 0xffff) > 1;
55 #define arch_spin_is_contended arch_spin_is_contended
57 static inline void arch_spin_lock(arch_spinlock_t *lock)
63 if (R10000_LLSC_WAR) {
64 __asm__ __volatile__ (
65 " .set push # arch_spin_lock \n"
68 "1: ll %[ticket], %[ticket_ptr] \n"
69 " addu %[my_ticket], %[ticket], %[inc] \n"
70 " sc %[my_ticket], %[ticket_ptr] \n"
71 " beqzl %[my_ticket], 1b \n"
73 " srl %[my_ticket], %[ticket], 16 \n"
74 " andi %[ticket], %[ticket], 0xffff \n"
75 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n"
79 "4: andi %[ticket], %[ticket], 0xffff \n"
80 " sll %[ticket], 5 \n"
82 "6: bnez %[ticket], 6b \n"
83 " subu %[ticket], 1 \n"
85 " lhu %[ticket], %[serving_now_ptr] \n"
86 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
89 " subu %[ticket], %[ticket], 1 \n"
92 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
93 [serving_now_ptr] "+m" (lock->h.serving_now),
95 [my_ticket] "=&r" (my_ticket)
98 __asm__ __volatile__ (
99 " .set push # arch_spin_lock \n"
102 "1: ll %[ticket], %[ticket_ptr] \n"
103 " addu %[my_ticket], %[ticket], %[inc] \n"
104 " sc %[my_ticket], %[ticket_ptr] \n"
105 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n"
108 " bne %[ticket], %[my_ticket], 4f \n"
109 " subu %[ticket], %[my_ticket], %[ticket] \n"
112 "4: andi %[ticket], %[ticket], 0x1fff \n"
113 " sll %[ticket], 5 \n"
115 "6: bnez %[ticket], 6b \n"
116 " subu %[ticket], 1 \n"
118 " lhu %[ticket], %[serving_now_ptr] \n"
119 " beq %[ticket], %[my_ticket], 2b \n"
120 " subu %[ticket], %[my_ticket], %[ticket] \n"
122 " subu %[ticket], %[ticket], 1 \n"
125 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
126 [serving_now_ptr] "+m" (lock->h.serving_now),
127 [ticket] "=&r" (tmp),
128 [my_ticket] "=&r" (my_ticket)
135 static inline void arch_spin_unlock(arch_spinlock_t *lock)
137 unsigned int serving_now = lock->h.serving_now + 1;
139 lock->h.serving_now = (u16)serving_now;
143 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
148 if (R10000_LLSC_WAR) {
149 __asm__ __volatile__ (
150 " .set push # arch_spin_trylock \n"
153 "1: ll %[ticket], %[ticket_ptr] \n"
154 " srl %[my_ticket], %[ticket], 16 \n"
155 " andi %[now_serving], %[ticket], 0xffff \n"
156 " bne %[my_ticket], %[now_serving], 3f \n"
157 " addu %[ticket], %[ticket], %[inc] \n"
158 " sc %[ticket], %[ticket_ptr] \n"
159 " beqzl %[ticket], 1b \n"
160 " li %[ticket], 1 \n"
164 " li %[ticket], 0 \n"
167 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2),
170 [now_serving] "=&r" (tmp3)
173 __asm__ __volatile__ (
174 " .set push # arch_spin_trylock \n"
177 "1: ll %[ticket], %[ticket_ptr] \n"
178 " srl %[my_ticket], %[ticket], 16 \n"
179 " andi %[now_serving], %[ticket], 0xffff \n"
180 " bne %[my_ticket], %[now_serving], 3f \n"
181 " addu %[ticket], %[ticket], %[inc] \n"
182 " sc %[ticket], %[ticket_ptr] \n"
183 " beqz %[ticket], 1b \n"
184 " li %[ticket], 1 \n"
188 " li %[ticket], 0 \n"
191 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2),
194 [now_serving] "=&r" (tmp3)
204 * Read-write spinlocks, allowing multiple readers but only one writer.
206 * NOTE! it is quite common to have readers in interrupts but no interrupt
207 * writers. For those circumstances we can "mix" irq-safe locks - any writer
208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
213 * read_can_lock - would read_trylock() succeed?
214 * @lock: the rwlock in question.
216 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
219 * write_can_lock - would write_trylock() succeed?
220 * @lock: the rwlock in question.
222 #define arch_write_can_lock(rw) (!(rw)->lock)
224 static inline void arch_read_lock(arch_rwlock_t *rw)
228 if (R10000_LLSC_WAR) {
229 __asm__ __volatile__(
230 " .set noreorder # arch_read_lock \n"
238 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF12_ASM() (rw->lock)
243 __asm__ __volatile__(
244 "1: ll %1, %2 # arch_read_lock \n"
248 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF12_ASM() (rw->lock)
251 } while (unlikely(!tmp));
257 /* Note the use of sub, not subu which will make the kernel die with an
258 overflow exception if we ever try to unlock an rwlock that is already
259 unlocked or is being held by a writer. */
260 static inline void arch_read_unlock(arch_rwlock_t *rw)
264 smp_mb__before_llsc();
266 if (R10000_LLSC_WAR) {
267 __asm__ __volatile__(
268 "1: ll %1, %2 # arch_read_unlock \n"
272 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
273 : GCC_OFF12_ASM() (rw->lock)
277 __asm__ __volatile__(
278 "1: ll %1, %2 # arch_read_unlock \n"
281 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
282 : GCC_OFF12_ASM() (rw->lock)
284 } while (unlikely(!tmp));
288 static inline void arch_write_lock(arch_rwlock_t *rw)
292 if (R10000_LLSC_WAR) {
293 __asm__ __volatile__(
294 " .set noreorder # arch_write_lock \n"
302 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
303 : GCC_OFF12_ASM() (rw->lock)
307 __asm__ __volatile__(
308 "1: ll %1, %2 # arch_write_lock \n"
312 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
313 : GCC_OFF12_ASM() (rw->lock)
315 } while (unlikely(!tmp));
321 static inline void arch_write_unlock(arch_rwlock_t *rw)
325 __asm__ __volatile__(
326 " # arch_write_unlock \n"
333 static inline int arch_read_trylock(arch_rwlock_t *rw)
338 if (R10000_LLSC_WAR) {
339 __asm__ __volatile__(
340 " .set noreorder # arch_read_trylock \n"
352 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
353 : GCC_OFF12_ASM() (rw->lock)
356 __asm__ __volatile__(
357 " .set noreorder # arch_read_trylock \n"
369 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
370 : GCC_OFF12_ASM() (rw->lock)
377 static inline int arch_write_trylock(arch_rwlock_t *rw)
382 if (R10000_LLSC_WAR) {
383 __asm__ __volatile__(
384 " .set noreorder # arch_write_trylock \n"
396 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
397 : GCC_OFF12_ASM() (rw->lock)
401 __asm__ __volatile__(
402 " ll %1, %3 # arch_write_trylock \n"
409 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
411 : GCC_OFF12_ASM() (rw->lock)
413 } while (unlikely(!tmp));
421 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
422 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
424 #define arch_spin_relax(lock) cpu_relax()
425 #define arch_read_relax(lock) cpu_relax()
426 #define arch_write_relax(lock) cpu_relax()
428 #endif /* _ASM_SPINLOCK_H */