2 * include/asm-xtensa/atomic.h
4 * Atomic operations that C can't guarantee us. Useful for resource counting..
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (C) 2001 - 2008 Tensilica Inc.
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
16 #include <linux/stringify.h>
17 #include <linux/types.h>
20 #include <asm/processor.h>
21 #include <asm/cmpxchg.h>
22 #include <asm/barrier.h>
24 #define ATOMIC_INIT(i) { (i) }
27 * This Xtensa implementation assumes that the right mechanism
28 * for exclusion is for locking interrupts to level EXCM_LEVEL.
30 * Locking interrupts looks like this:
37 * Note that a15 is used here because the register allocation
38 * done by the compiler is not guaranteed and a window overflow
39 * may not occur between the rsil and wsr instructions. By using
40 * a15 in the rsil, the machine is guaranteed to be in a state
41 * where no register reference will cause an overflow.
45 * atomic_read - read atomic variable
46 * @v: pointer of type atomic_t
48 * Atomically reads the value of @v.
50 #define atomic_read(v) (*(volatile int *)&(v)->counter)
53 * atomic_set - set atomic variable
54 * @v: pointer of type atomic_t
57 * Atomically sets the value of @v to @i.
59 #define atomic_set(v,i) ((v)->counter = (i))
62 * atomic_add - add integer to atomic variable
63 * @i: integer value to add
64 * @v: pointer of type atomic_t
66 * Atomically adds @i to @v.
68 static inline void atomic_add(int i, atomic_t * v)
76 " wsr %1, scompare1\n"
80 : "=&a" (result), "=&a" (tmp)
88 " rsil a15, "__stringify(LOCKLEVEL)"\n"
102 * atomic_sub - subtract the atomic variable
103 * @i: integer value to subtract
104 * @v: pointer of type atomic_t
106 * Atomically subtracts @i from @v.
108 static inline void atomic_sub(int i, atomic_t *v)
110 #if XCHAL_HAVE_S32C1I
114 __asm__ __volatile__(
115 "1: l32i %1, %3, 0\n"
116 " wsr %1, scompare1\n"
118 " s32c1i %0, %3, 0\n"
120 : "=&a" (result), "=&a" (tmp)
127 __asm__ __volatile__(
128 " rsil a15, "__stringify(LOCKLEVEL)"\n"
142 * We use atomic_{add|sub}_return to define other functions.
145 static inline int atomic_add_return(int i, atomic_t * v)
147 #if XCHAL_HAVE_S32C1I
151 __asm__ __volatile__(
152 "1: l32i %1, %3, 0\n"
153 " wsr %1, scompare1\n"
155 " s32c1i %0, %3, 0\n"
158 : "=&a" (result), "=&a" (tmp)
167 __asm__ __volatile__(
168 " rsil a15,"__stringify(LOCKLEVEL)"\n"
183 static inline int atomic_sub_return(int i, atomic_t * v)
185 #if XCHAL_HAVE_S32C1I
189 __asm__ __volatile__(
190 "1: l32i %1, %3, 0\n"
191 " wsr %1, scompare1\n"
193 " s32c1i %0, %3, 0\n"
196 : "=&a" (result), "=&a" (tmp)
205 __asm__ __volatile__(
206 " rsil a15,"__stringify(LOCKLEVEL)"\n"
222 * atomic_sub_and_test - subtract value from variable and test result
223 * @i: integer value to subtract
224 * @v: pointer of type atomic_t
226 * Atomically subtracts @i from @v and returns
227 * true if the result is zero, or false for all
230 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
233 * atomic_inc - increment atomic variable
234 * @v: pointer of type atomic_t
236 * Atomically increments @v by 1.
238 #define atomic_inc(v) atomic_add(1,(v))
241 * atomic_inc - increment atomic variable
242 * @v: pointer of type atomic_t
244 * Atomically increments @v by 1.
246 #define atomic_inc_return(v) atomic_add_return(1,(v))
249 * atomic_dec - decrement atomic variable
250 * @v: pointer of type atomic_t
252 * Atomically decrements @v by 1.
254 #define atomic_dec(v) atomic_sub(1,(v))
257 * atomic_dec_return - decrement atomic variable
258 * @v: pointer of type atomic_t
260 * Atomically decrements @v by 1.
262 #define atomic_dec_return(v) atomic_sub_return(1,(v))
265 * atomic_dec_and_test - decrement and test
266 * @v: pointer of type atomic_t
268 * Atomically decrements @v by 1 and
269 * returns true if the result is 0, or false for all other
272 #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
275 * atomic_inc_and_test - increment and test
276 * @v: pointer of type atomic_t
278 * Atomically increments @v by 1
279 * and returns true if the result is zero, or false for all
282 #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
285 * atomic_add_negative - add and test if negative
286 * @v: pointer of type atomic_t
287 * @i: integer value to add
289 * Atomically adds @i to @v and returns true
290 * if the result is negative, or false when
291 * result is greater than or equal to zero.
293 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
295 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
296 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
299 * __atomic_add_unless - add unless the number is a given value
300 * @v: pointer of type atomic_t
301 * @a: the amount to add to v...
302 * @u: ...unless v is equal to u.
304 * Atomically adds @a to @v, so long as it was not @u.
305 * Returns the old value of @v.
307 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
312 if (unlikely(c == (u)))
314 old = atomic_cmpxchg((v), c, c + (a));
315 if (likely(old == c))
323 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
325 #if XCHAL_HAVE_S32C1I
329 __asm__ __volatile__(
330 "1: l32i %1, %3, 0\n"
331 " wsr %1, scompare1\n"
333 " s32c1i %0, %3, 0\n"
335 : "=&a" (result), "=&a" (tmp)
336 : "a" (~mask), "a" (v)
340 unsigned int all_f = -1;
343 __asm__ __volatile__(
344 " rsil a15,"__stringify(LOCKLEVEL)"\n"
351 : "=&a" (vval), "=a" (mask)
352 : "a" (v), "a" (all_f), "1" (mask)
358 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
360 #if XCHAL_HAVE_S32C1I
364 __asm__ __volatile__(
365 "1: l32i %1, %3, 0\n"
366 " wsr %1, scompare1\n"
368 " s32c1i %0, %3, 0\n"
370 : "=&a" (result), "=&a" (tmp)
371 : "a" (mask), "a" (v)
377 __asm__ __volatile__(
378 " rsil a15,"__stringify(LOCKLEVEL)"\n"
385 : "a" (mask), "a" (v)
391 #endif /* __KERNEL__ */
393 #endif /* _XTENSA_ATOMIC_H */