nmi_backtrace: add more trigger_*_cpu_backtrace() methods
[cascardo/linux.git] / arch / x86 / include / asm / cmpxchg.h
1 #ifndef ASM_X86_CMPXCHG_H
2 #define ASM_X86_CMPXCHG_H
3
4 #include <linux/compiler.h>
5 #include <asm/cpufeatures.h>
6 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
7
8 /*
9  * Non-existant functions to indicate usage errors at link time
10  * (or compile-time if the compiler implements __compiletime_error().
11  */
12 extern void __xchg_wrong_size(void)
13         __compiletime_error("Bad argument size for xchg");
14 extern void __cmpxchg_wrong_size(void)
15         __compiletime_error("Bad argument size for cmpxchg");
16 extern void __xadd_wrong_size(void)
17         __compiletime_error("Bad argument size for xadd");
18 extern void __add_wrong_size(void)
19         __compiletime_error("Bad argument size for add");
20
21 /*
22  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
23  * -1 because sizeof will never return -1, thereby making those switch
24  * case statements guaranteeed dead code which the compiler will
25  * eliminate, and allowing the "missing symbol in the default case" to
26  * indicate a usage error.
27  */
28 #define __X86_CASE_B    1
29 #define __X86_CASE_W    2
30 #define __X86_CASE_L    4
31 #ifdef CONFIG_64BIT
32 #define __X86_CASE_Q    8
33 #else
34 #define __X86_CASE_Q    -1              /* sizeof will never return -1 */
35 #endif
36
37 /* 
38  * An exchange-type operation, which takes a value and a pointer, and
39  * returns the old value.
40  */
41 #define __xchg_op(ptr, arg, op, lock)                                   \
42         ({                                                              \
43                 __typeof__ (*(ptr)) __ret = (arg);                      \
44                 switch (sizeof(*(ptr))) {                               \
45                 case __X86_CASE_B:                                      \
46                         asm volatile (lock #op "b %b0, %1\n"            \
47                                       : "+q" (__ret), "+m" (*(ptr))     \
48                                       : : "memory", "cc");              \
49                         break;                                          \
50                 case __X86_CASE_W:                                      \
51                         asm volatile (lock #op "w %w0, %1\n"            \
52                                       : "+r" (__ret), "+m" (*(ptr))     \
53                                       : : "memory", "cc");              \
54                         break;                                          \
55                 case __X86_CASE_L:                                      \
56                         asm volatile (lock #op "l %0, %1\n"             \
57                                       : "+r" (__ret), "+m" (*(ptr))     \
58                                       : : "memory", "cc");              \
59                         break;                                          \
60                 case __X86_CASE_Q:                                      \
61                         asm volatile (lock #op "q %q0, %1\n"            \
62                                       : "+r" (__ret), "+m" (*(ptr))     \
63                                       : : "memory", "cc");              \
64                         break;                                          \
65                 default:                                                \
66                         __ ## op ## _wrong_size();                      \
67                 }                                                       \
68                 __ret;                                                  \
69         })
70
71 /*
72  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
73  * Since this is generally used to protect other memory information, we
74  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
75  * information around.
76  */
77 #define xchg(ptr, v)    __xchg_op((ptr), (v), xchg, "")
78
79 /*
80  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
81  * store NEW in MEM.  Return the initial value in MEM.  Success is
82  * indicated by comparing RETURN with OLD.
83  */
84 #define __raw_cmpxchg(ptr, old, new, size, lock)                        \
85 ({                                                                      \
86         __typeof__(*(ptr)) __ret;                                       \
87         __typeof__(*(ptr)) __old = (old);                               \
88         __typeof__(*(ptr)) __new = (new);                               \
89         switch (size) {                                                 \
90         case __X86_CASE_B:                                              \
91         {                                                               \
92                 volatile u8 *__ptr = (volatile u8 *)(ptr);              \
93                 asm volatile(lock "cmpxchgb %2,%1"                      \
94                              : "=a" (__ret), "+m" (*__ptr)              \
95                              : "q" (__new), "0" (__old)                 \
96                              : "memory");                               \
97                 break;                                                  \
98         }                                                               \
99         case __X86_CASE_W:                                              \
100         {                                                               \
101                 volatile u16 *__ptr = (volatile u16 *)(ptr);            \
102                 asm volatile(lock "cmpxchgw %2,%1"                      \
103                              : "=a" (__ret), "+m" (*__ptr)              \
104                              : "r" (__new), "0" (__old)                 \
105                              : "memory");                               \
106                 break;                                                  \
107         }                                                               \
108         case __X86_CASE_L:                                              \
109         {                                                               \
110                 volatile u32 *__ptr = (volatile u32 *)(ptr);            \
111                 asm volatile(lock "cmpxchgl %2,%1"                      \
112                              : "=a" (__ret), "+m" (*__ptr)              \
113                              : "r" (__new), "0" (__old)                 \
114                              : "memory");                               \
115                 break;                                                  \
116         }                                                               \
117         case __X86_CASE_Q:                                              \
118         {                                                               \
119                 volatile u64 *__ptr = (volatile u64 *)(ptr);            \
120                 asm volatile(lock "cmpxchgq %2,%1"                      \
121                              : "=a" (__ret), "+m" (*__ptr)              \
122                              : "r" (__new), "0" (__old)                 \
123                              : "memory");                               \
124                 break;                                                  \
125         }                                                               \
126         default:                                                        \
127                 __cmpxchg_wrong_size();                                 \
128         }                                                               \
129         __ret;                                                          \
130 })
131
132 #define __cmpxchg(ptr, old, new, size)                                  \
133         __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
134
135 #define __sync_cmpxchg(ptr, old, new, size)                             \
136         __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
137
138 #define __cmpxchg_local(ptr, old, new, size)                            \
139         __raw_cmpxchg((ptr), (old), (new), (size), "")
140
141 #ifdef CONFIG_X86_32
142 # include <asm/cmpxchg_32.h>
143 #else
144 # include <asm/cmpxchg_64.h>
145 #endif
146
147 #define cmpxchg(ptr, old, new)                                          \
148         __cmpxchg(ptr, old, new, sizeof(*(ptr)))
149
150 #define sync_cmpxchg(ptr, old, new)                                     \
151         __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
152
153 #define cmpxchg_local(ptr, old, new)                                    \
154         __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
155
156 /*
157  * xadd() adds "inc" to "*ptr" and atomically returns the previous
158  * value of "*ptr".
159  *
160  * xadd() is locked when multiple CPUs are online
161  */
162 #define __xadd(ptr, inc, lock)  __xchg_op((ptr), (inc), xadd, lock)
163 #define xadd(ptr, inc)          __xadd((ptr), (inc), LOCK_PREFIX)
164
165 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)                   \
166 ({                                                                      \
167         bool __ret;                                                     \
168         __typeof__(*(p1)) __old1 = (o1), __new1 = (n1);                 \
169         __typeof__(*(p2)) __old2 = (o2), __new2 = (n2);                 \
170         BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));                    \
171         BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));                    \
172         VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));            \
173         VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));    \
174         asm volatile(pfx "cmpxchg%c4b %2; sete %0"                      \
175                      : "=a" (__ret), "+d" (__old2),                     \
176                        "+m" (*(p1)), "+m" (*(p2))                       \
177                      : "i" (2 * sizeof(long)), "a" (__old1),            \
178                        "b" (__new1), "c" (__new2));                     \
179         __ret;                                                          \
180 })
181
182 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
183         __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
184
185 #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
186         __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
187
188 #endif  /* ASM_X86_CMPXCHG_H */