Merge branch 'stable/for-jens-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / arc / include / asm / atomic.h
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11
12 #ifndef __ASSEMBLY__
13
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19
20 #ifndef CONFIG_ARC_PLAT_EZNPS
21
22 #define atomic_read(v)  READ_ONCE((v)->counter)
23
24 #ifdef CONFIG_ARC_HAS_LLSC
25
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27
28 #ifdef CONFIG_ARC_STAR_9000923308
29
30 #define SCOND_FAIL_RETRY_VAR_DEF                                                \
31         unsigned int delay = 1, tmp;                                            \
32
33 #define SCOND_FAIL_RETRY_ASM                                                    \
34         "       bz      4f                      \n"                             \
35         "   ; --- scond fail delay ---          \n"                             \
36         "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
37         "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
38         "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
39         "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
40         "       b       1b                      \n"     /* start over */        \
41         "4: ; --- success ---                   \n"                             \
42
43 #define SCOND_FAIL_RETRY_VARS                                                   \
44           ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
45
46 #else   /* !CONFIG_ARC_STAR_9000923308 */
47
48 #define SCOND_FAIL_RETRY_VAR_DEF
49
50 #define SCOND_FAIL_RETRY_ASM                                                    \
51         "       bnz     1b                      \n"                             \
52
53 #define SCOND_FAIL_RETRY_VARS
54
55 #endif
56
57 #define ATOMIC_OP(op, c_op, asm_op)                                     \
58 static inline void atomic_##op(int i, atomic_t *v)                      \
59 {                                                                       \
60         unsigned int val;                                               \
61         SCOND_FAIL_RETRY_VAR_DEF                                        \
62                                                                         \
63         __asm__ __volatile__(                                           \
64         "1:     llock   %[val], [%[ctr]]                \n"             \
65         "       " #asm_op " %[val], %[val], %[i]        \n"             \
66         "       scond   %[val], [%[ctr]]                \n"             \
67         "                                               \n"             \
68         SCOND_FAIL_RETRY_ASM                                            \
69                                                                         \
70         : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
71           SCOND_FAIL_RETRY_VARS                                         \
72         : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
73           [i]   "ir"    (i)                                             \
74         : "cc");                                                        \
75 }                                                                       \
76
77 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
78 static inline int atomic_##op##_return(int i, atomic_t *v)              \
79 {                                                                       \
80         unsigned int val;                                               \
81         SCOND_FAIL_RETRY_VAR_DEF                                        \
82                                                                         \
83         /*                                                              \
84          * Explicit full memory barrier needed before/after as          \
85          * LLOCK/SCOND thmeselves don't provide any such semantics      \
86          */                                                             \
87         smp_mb();                                                       \
88                                                                         \
89         __asm__ __volatile__(                                           \
90         "1:     llock   %[val], [%[ctr]]                \n"             \
91         "       " #asm_op " %[val], %[val], %[i]        \n"             \
92         "       scond   %[val], [%[ctr]]                \n"             \
93         "                                               \n"             \
94         SCOND_FAIL_RETRY_ASM                                            \
95                                                                         \
96         : [val] "=&r"   (val)                                           \
97           SCOND_FAIL_RETRY_VARS                                         \
98         : [ctr] "r"     (&v->counter),                                  \
99           [i]   "ir"    (i)                                             \
100         : "cc");                                                        \
101                                                                         \
102         smp_mb();                                                       \
103                                                                         \
104         return val;                                                     \
105 }
106
107 #else   /* !CONFIG_ARC_HAS_LLSC */
108
109 #ifndef CONFIG_SMP
110
111  /* violating atomic_xxx API locking protocol in UP for optimization sake */
112 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
113
114 #else
115
116 static inline void atomic_set(atomic_t *v, int i)
117 {
118         /*
119          * Independent of hardware support, all of the atomic_xxx() APIs need
120          * to follow the same locking rules to make sure that a "hardware"
121          * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
122          * sequence
123          *
124          * Thus atomic_set() despite being 1 insn (and seemingly atomic)
125          * requires the locking.
126          */
127         unsigned long flags;
128
129         atomic_ops_lock(flags);
130         WRITE_ONCE(v->counter, i);
131         atomic_ops_unlock(flags);
132 }
133
134 #endif
135
136 /*
137  * Non hardware assisted Atomic-R-M-W
138  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
139  */
140
141 #define ATOMIC_OP(op, c_op, asm_op)                                     \
142 static inline void atomic_##op(int i, atomic_t *v)                      \
143 {                                                                       \
144         unsigned long flags;                                            \
145                                                                         \
146         atomic_ops_lock(flags);                                         \
147         v->counter c_op i;                                              \
148         atomic_ops_unlock(flags);                                       \
149 }
150
151 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
152 static inline int atomic_##op##_return(int i, atomic_t *v)              \
153 {                                                                       \
154         unsigned long flags;                                            \
155         unsigned long temp;                                             \
156                                                                         \
157         /*                                                              \
158          * spin lock/unlock provides the needed smp_mb() before/after   \
159          */                                                             \
160         atomic_ops_lock(flags);                                         \
161         temp = v->counter;                                              \
162         temp c_op i;                                                    \
163         v->counter = temp;                                              \
164         atomic_ops_unlock(flags);                                       \
165                                                                         \
166         return temp;                                                    \
167 }
168
169 #endif /* !CONFIG_ARC_HAS_LLSC */
170
171 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
172         ATOMIC_OP(op, c_op, asm_op)                                     \
173         ATOMIC_OP_RETURN(op, c_op, asm_op)
174
175 ATOMIC_OPS(add, +=, add)
176 ATOMIC_OPS(sub, -=, sub)
177
178 #define atomic_andnot atomic_andnot
179
180 ATOMIC_OP(and, &=, and)
181 ATOMIC_OP(andnot, &= ~, bic)
182 ATOMIC_OP(or, |=, or)
183 ATOMIC_OP(xor, ^=, xor)
184
185 #undef SCOND_FAIL_RETRY_VAR_DEF
186 #undef SCOND_FAIL_RETRY_ASM
187 #undef SCOND_FAIL_RETRY_VARS
188
189 #else /* CONFIG_ARC_PLAT_EZNPS */
190
191 static inline int atomic_read(const atomic_t *v)
192 {
193         int temp;
194
195         __asm__ __volatile__(
196         "       ld.di %0, [%1]"
197         : "=r"(temp)
198         : "r"(&v->counter)
199         : "memory");
200         return temp;
201 }
202
203 static inline void atomic_set(atomic_t *v, int i)
204 {
205         __asm__ __volatile__(
206         "       st.di %0,[%1]"
207         :
208         : "r"(i), "r"(&v->counter)
209         : "memory");
210 }
211
212 #define ATOMIC_OP(op, c_op, asm_op)                                     \
213 static inline void atomic_##op(int i, atomic_t *v)                      \
214 {                                                                       \
215         __asm__ __volatile__(                                           \
216         "       mov r2, %0\n"                                           \
217         "       mov r3, %1\n"                                           \
218         "       .word %2\n"                                             \
219         :                                                               \
220         : "r"(i), "r"(&v->counter), "i"(asm_op)                         \
221         : "r2", "r3", "memory");                                        \
222 }                                                                       \
223
224 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
225 static inline int atomic_##op##_return(int i, atomic_t *v)              \
226 {                                                                       \
227         unsigned int temp = i;                                          \
228                                                                         \
229         /* Explicit full memory barrier needed before/after */          \
230         smp_mb();                                                       \
231                                                                         \
232         __asm__ __volatile__(                                           \
233         "       mov r2, %0\n"                                           \
234         "       mov r3, %1\n"                                           \
235         "       .word %2\n"                                             \
236         "       mov %0, r2"                                             \
237         : "+r"(temp)                                                    \
238         : "r"(&v->counter), "i"(asm_op)                                 \
239         : "r2", "r3", "memory");                                        \
240                                                                         \
241         smp_mb();                                                       \
242                                                                         \
243         temp c_op i;                                                    \
244                                                                         \
245         return temp;                                                    \
246 }
247
248 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
249         ATOMIC_OP(op, c_op, asm_op)                                     \
250         ATOMIC_OP_RETURN(op, c_op, asm_op)
251
252 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
253 #define atomic_sub(i, v) atomic_add(-(i), (v))
254 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
255
256 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
257 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
258 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
259 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
260
261 #endif /* CONFIG_ARC_PLAT_EZNPS */
262
263 #undef ATOMIC_OPS
264 #undef ATOMIC_OP_RETURN
265 #undef ATOMIC_OP
266
267 /**
268  * __atomic_add_unless - add unless the number is a given value
269  * @v: pointer of type atomic_t
270  * @a: the amount to add to v...
271  * @u: ...unless v is equal to u.
272  *
273  * Atomically adds @a to @v, so long as it was not @u.
274  * Returns the old value of @v
275  */
276 #define __atomic_add_unless(v, a, u)                                    \
277 ({                                                                      \
278         int c, old;                                                     \
279                                                                         \
280         /*                                                              \
281          * Explicit full memory barrier needed before/after as          \
282          * LLOCK/SCOND thmeselves don't provide any such semantics      \
283          */                                                             \
284         smp_mb();                                                       \
285                                                                         \
286         c = atomic_read(v);                                             \
287         while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
288                 c = old;                                                \
289                                                                         \
290         smp_mb();                                                       \
291                                                                         \
292         c;                                                              \
293 })
294
295 #define atomic_inc_not_zero(v)          atomic_add_unless((v), 1, 0)
296
297 #define atomic_inc(v)                   atomic_add(1, v)
298 #define atomic_dec(v)                   atomic_sub(1, v)
299
300 #define atomic_inc_and_test(v)          (atomic_add_return(1, v) == 0)
301 #define atomic_dec_and_test(v)          (atomic_sub_return(1, v) == 0)
302 #define atomic_inc_return(v)            atomic_add_return(1, (v))
303 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
304 #define atomic_sub_and_test(i, v)       (atomic_sub_return(i, v) == 0)
305
306 #define atomic_add_negative(i, v)       (atomic_add_return(i, v) < 0)
307
308 #define ATOMIC_INIT(i)                  { (i) }
309
310 #include <asm-generic/atomic64.h>
311
312 #endif
313
314 #endif