1 #ifndef __ASM_SH_ATOMIC_LLSC_H
2 #define __ASM_SH_ATOMIC_LLSC_H
7 * We basically get atomic_xxx_return() for free compared with
8 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
9 * encoding, so the retval is automatically set without having to
10 * do any special work.
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
18 #define ATOMIC_OP(op) \
19 static inline void atomic_##op(int i, atomic_t *v) \
23 __asm__ __volatile__ ( \
24 "1: movli.l @%2, %0 ! atomic_" #op "\n" \
25 " " #op " %1, %0 \n" \
26 " movco.l %0, @%2 \n" \
29 : "r" (i), "r" (&v->counter) \
33 #define ATOMIC_OP_RETURN(op) \
34 static inline int atomic_##op##_return(int i, atomic_t *v) \
38 __asm__ __volatile__ ( \
39 "1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
40 " " #op " %1, %0 \n" \
41 " movco.l %0, @%2 \n" \
45 : "r" (i), "r" (&v->counter) \
51 #define ATOMIC_FETCH_OP(op) \
52 static inline int atomic_fetch_##op(int i, atomic_t *v) \
54 unsigned long res, temp; \
56 __asm__ __volatile__ ( \
57 "1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
59 " " #op " %2, %0 \n" \
60 " movco.l %0, @%3 \n" \
63 : "=&z" (temp), "=&r" (res) \
64 : "r" (i), "r" (&v->counter) \
70 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
76 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
83 #undef ATOMIC_FETCH_OP
84 #undef ATOMIC_OP_RETURN
87 #endif /* __ASM_SH_ATOMIC_LLSC_H */