2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #ifndef CONFIG_ARC_PLAT_EZNPS
22 #define atomic_read(v) READ_ONCE((v)->counter)
23 #define ATOMIC_INIT(i) { (i) }
25 #ifdef CONFIG_ARC_HAS_LLSC
27 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
29 #define ATOMIC_OP(op, c_op, asm_op) \
30 static inline void atomic_##op(int i, atomic_t *v) \
34 __asm__ __volatile__( \
35 "1: llock %[val], [%[ctr]] \n" \
36 " " #asm_op " %[val], %[val], %[i] \n" \
37 " scond %[val], [%[ctr]] \n" \
39 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
40 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
45 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
46 static inline int atomic_##op##_return(int i, atomic_t *v) \
51 * Explicit full memory barrier needed before/after as \
52 * LLOCK/SCOND thmeselves don't provide any such semantics \
56 __asm__ __volatile__( \
57 "1: llock %[val], [%[ctr]] \n" \
58 " " #asm_op " %[val], %[val], %[i] \n" \
59 " scond %[val], [%[ctr]] \n" \
62 : [ctr] "r" (&v->counter), \
71 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
72 static inline int atomic_fetch_##op(int i, atomic_t *v) \
74 unsigned int val, orig; \
77 * Explicit full memory barrier needed before/after as \
78 * LLOCK/SCOND thmeselves don't provide any such semantics \
82 __asm__ __volatile__( \
83 "1: llock %[orig], [%[ctr]] \n" \
84 " " #asm_op " %[val], %[orig], %[i] \n" \
85 " scond %[val], [%[ctr]] \n" \
87 : [val] "=&r" (val), \
89 : [ctr] "r" (&v->counter), \
98 #else /* !CONFIG_ARC_HAS_LLSC */
102 /* violating atomic_xxx API locking protocol in UP for optimization sake */
103 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
107 static inline void atomic_set(atomic_t *v, int i)
110 * Independent of hardware support, all of the atomic_xxx() APIs need
111 * to follow the same locking rules to make sure that a "hardware"
112 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
115 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
116 * requires the locking.
120 atomic_ops_lock(flags);
121 WRITE_ONCE(v->counter, i);
122 atomic_ops_unlock(flags);
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
132 #define ATOMIC_OP(op, c_op, asm_op) \
133 static inline void atomic_##op(int i, atomic_t *v) \
135 unsigned long flags; \
137 atomic_ops_lock(flags); \
139 atomic_ops_unlock(flags); \
142 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
143 static inline int atomic_##op##_return(int i, atomic_t *v) \
145 unsigned long flags; \
146 unsigned long temp; \
149 * spin lock/unlock provides the needed smp_mb() before/after \
151 atomic_ops_lock(flags); \
155 atomic_ops_unlock(flags); \
160 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
161 static inline int atomic_fetch_##op(int i, atomic_t *v) \
163 unsigned long flags; \
164 unsigned long orig; \
167 * spin lock/unlock provides the needed smp_mb() before/after \
169 atomic_ops_lock(flags); \
172 atomic_ops_unlock(flags); \
177 #endif /* !CONFIG_ARC_HAS_LLSC */
179 #define ATOMIC_OPS(op, c_op, asm_op) \
180 ATOMIC_OP(op, c_op, asm_op) \
181 ATOMIC_OP_RETURN(op, c_op, asm_op) \
182 ATOMIC_FETCH_OP(op, c_op, asm_op)
184 ATOMIC_OPS(add, +=, add)
185 ATOMIC_OPS(sub, -=, sub)
187 #define atomic_andnot atomic_andnot
190 #define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_FETCH_OP(op, c_op, asm_op)
194 ATOMIC_OPS(and, &=, and)
195 ATOMIC_OPS(andnot, &= ~, bic)
196 ATOMIC_OPS(or, |=, or)
197 ATOMIC_OPS(xor, ^=, xor)
199 #else /* CONFIG_ARC_PLAT_EZNPS */
201 static inline int atomic_read(const atomic_t *v)
205 __asm__ __volatile__(
213 static inline void atomic_set(atomic_t *v, int i)
215 __asm__ __volatile__(
218 : "r"(i), "r"(&v->counter)
222 #define ATOMIC_OP(op, c_op, asm_op) \
223 static inline void atomic_##op(int i, atomic_t *v) \
225 __asm__ __volatile__( \
230 : "r"(i), "r"(&v->counter), "i"(asm_op) \
231 : "r2", "r3", "memory"); \
234 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
235 static inline int atomic_##op##_return(int i, atomic_t *v) \
237 unsigned int temp = i; \
239 /* Explicit full memory barrier needed before/after */ \
242 __asm__ __volatile__( \
248 : "r"(&v->counter), "i"(asm_op) \
249 : "r2", "r3", "memory"); \
258 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
259 static inline int atomic_fetch_##op(int i, atomic_t *v) \
261 unsigned int temp = i; \
263 /* Explicit full memory barrier needed before/after */ \
266 __asm__ __volatile__( \
272 : "r"(&v->counter), "i"(asm_op) \
273 : "r2", "r3", "memory"); \
280 #define ATOMIC_OPS(op, c_op, asm_op) \
281 ATOMIC_OP(op, c_op, asm_op) \
282 ATOMIC_OP_RETURN(op, c_op, asm_op) \
283 ATOMIC_FETCH_OP(op, c_op, asm_op)
285 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
286 #define atomic_sub(i, v) atomic_add(-(i), (v))
287 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
288 #define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
291 #define ATOMIC_OPS(op, c_op, asm_op) \
292 ATOMIC_OP(op, c_op, asm_op) \
293 ATOMIC_FETCH_OP(op, c_op, asm_op)
295 ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
296 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
297 #define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
298 ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
299 ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
301 #endif /* CONFIG_ARC_PLAT_EZNPS */
304 #undef ATOMIC_FETCH_OP
305 #undef ATOMIC_OP_RETURN
309 * __atomic_add_unless - add unless the number is a given value
310 * @v: pointer of type atomic_t
311 * @a: the amount to add to v...
312 * @u: ...unless v is equal to u.
314 * Atomically adds @a to @v, so long as it was not @u.
315 * Returns the old value of @v
317 #define __atomic_add_unless(v, a, u) \
322 * Explicit full memory barrier needed before/after as \
323 * LLOCK/SCOND thmeselves don't provide any such semantics \
327 c = atomic_read(v); \
328 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
336 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
338 #define atomic_inc(v) atomic_add(1, v)
339 #define atomic_dec(v) atomic_sub(1, v)
341 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
342 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
343 #define atomic_inc_return(v) atomic_add_return(1, (v))
344 #define atomic_dec_return(v) atomic_sub_return(1, (v))
345 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
347 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
350 #ifdef CONFIG_GENERIC_ATOMIC64
352 #include <asm-generic/atomic64.h>
354 #else /* Kconfig ensures this is only enabled with needed h/w assist */
357 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
358 * - The address HAS to be 64-bit aligned
359 * - There are 2 semantics involved here:
360 * = exclusive implies no interim update between load/store to same addr
361 * = both words are observed/updated together: this is guaranteed even
362 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
363 * is NOT required to use LLOCKD+SCONDD, STD suffices
370 #define ATOMIC64_INIT(a) { (a) }
372 static inline long long atomic64_read(const atomic64_t *v)
374 unsigned long long val;
376 __asm__ __volatile__(
384 static inline void atomic64_set(atomic64_t *v, long long a)
387 * This could have been a simple assignment in "C" but would need
388 * explicit volatile. Otherwise gcc optimizers could elide the store
389 * which borked atomic64 self-test
390 * In the inline asm version, memory clobber needed for exact same
391 * reason, to tell gcc about the store.
393 * This however is not needed for sibling atomic64_add() etc since both
394 * load/store are explicitly done in inline asm. As long as API is used
395 * for each access, gcc has no way to optimize away any load/store
397 __asm__ __volatile__(
400 : "r"(a), "r"(&v->counter)
404 #define ATOMIC64_OP(op, op1, op2) \
405 static inline void atomic64_##op(long long a, atomic64_t *v) \
407 unsigned long long val; \
409 __asm__ __volatile__( \
411 " llockd %0, [%1] \n" \
412 " " #op1 " %L0, %L0, %L2 \n" \
413 " " #op2 " %H0, %H0, %H2 \n" \
414 " scondd %0, [%1] \n" \
417 : "r"(&v->counter), "ir"(a) \
421 #define ATOMIC64_OP_RETURN(op, op1, op2) \
422 static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
424 unsigned long long val; \
428 __asm__ __volatile__( \
430 " llockd %0, [%1] \n" \
431 " " #op1 " %L0, %L0, %L2 \n" \
432 " " #op2 " %H0, %H0, %H2 \n" \
433 " scondd %0, [%1] \n" \
436 : "r"(&v->counter), "ir"(a) \
437 : "cc"); /* memory clobber comes from smp_mb() */ \
444 #define ATOMIC64_FETCH_OP(op, op1, op2) \
445 static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
447 unsigned long long val, orig; \
451 __asm__ __volatile__( \
453 " llockd %0, [%2] \n" \
454 " " #op1 " %L1, %L0, %L3 \n" \
455 " " #op2 " %H1, %H0, %H3 \n" \
456 " scondd %1, [%2] \n" \
458 : "=&r"(orig), "=&r"(val) \
459 : "r"(&v->counter), "ir"(a) \
460 : "cc"); /* memory clobber comes from smp_mb() */ \
467 #define ATOMIC64_OPS(op, op1, op2) \
468 ATOMIC64_OP(op, op1, op2) \
469 ATOMIC64_OP_RETURN(op, op1, op2) \
470 ATOMIC64_FETCH_OP(op, op1, op2)
472 #define atomic64_andnot atomic64_andnot
474 ATOMIC64_OPS(add, add.f, adc)
475 ATOMIC64_OPS(sub, sub.f, sbc)
476 ATOMIC64_OPS(and, and, and)
477 ATOMIC64_OPS(andnot, bic, bic)
478 ATOMIC64_OPS(or, or, or)
479 ATOMIC64_OPS(xor, xor, xor)
482 #undef ATOMIC64_FETCH_OP
483 #undef ATOMIC64_OP_RETURN
486 static inline long long
487 atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
493 __asm__ __volatile__(
494 "1: llockd %0, [%1] \n"
495 " brne %L0, %L2, 2f \n"
496 " brne %H0, %H2, 2f \n"
497 " scondd %3, [%1] \n"
501 : "r"(ptr), "ir"(expected), "r"(new)
502 : "cc"); /* memory clobber comes from smp_mb() */
509 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
515 __asm__ __volatile__(
516 "1: llockd %0, [%1] \n"
517 " scondd %2, [%1] \n"
522 : "cc"); /* memory clobber comes from smp_mb() */
530 * atomic64_dec_if_positive - decrement by 1 if old value positive
531 * @v: pointer of type atomic64_t
533 * The function returns the old value of *v minus 1, even if
534 * the atomic variable, v, was not decremented.
537 static inline long long atomic64_dec_if_positive(atomic64_t *v)
543 __asm__ __volatile__(
544 "1: llockd %0, [%1] \n"
545 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
546 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
547 " brlt %H0, 0, 2f \n"
548 " scondd %0, [%1] \n"
553 : "cc"); /* memory clobber comes from smp_mb() */
561 * atomic64_add_unless - add unless the number is a given value
562 * @v: pointer of type atomic64_t
563 * @a: the amount to add to v...
564 * @u: ...unless v is equal to u.
566 * if (v != u) { v += a; ret = 1} else {ret = 0}
567 * Returns 1 iff @v was not @u (i.e. if add actually happened)
569 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
576 __asm__ __volatile__(
577 "1: llockd %0, [%2] \n"
579 " brne %L0, %L4, 2f # continue to add since v != u \n"
580 " breq.d %H0, %H4, 3f # return since v == u \n"
583 " add.f %L0, %L0, %L3 \n"
584 " adc %H0, %H0, %H3 \n"
585 " scondd %0, [%2] \n"
588 : "=&r"(val), "=&r" (op_done)
589 : "r"(&v->counter), "r"(a), "r"(u)
590 : "cc"); /* memory clobber comes from smp_mb() */
597 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
598 #define atomic64_inc(v) atomic64_add(1LL, (v))
599 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
600 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
601 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
602 #define atomic64_dec(v) atomic64_sub(1LL, (v))
603 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
604 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
605 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
607 #endif /* !CONFIG_GENERIC_ATOMIC64 */
609 #endif /* !__ASSEMBLY__ */