x86/cmpxchg, locking/atomics: Remove superfluous definitions
authorNikolay Borisov <n.borisov.lkml@gmail.com>
Mon, 26 Sep 2016 18:11:18 +0000 (21:11 +0300)
committerIngo Molnar <mingo@kernel.org>
Fri, 30 Sep 2016 08:56:01 +0000 (10:56 +0200)
cmpxchg contained definitions for unused (x)add_* operations, dating back
to the original ticket spinlock implementation. Nowadays these are
unused so remove them.

Signed-off-by: Nikolay Borisov <n.borisov.lkml@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: hpa@zytor.com
Link: http://lkml.kernel.org/r/1474913478-17757-1-git-send-email-n.borisov.lkml@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/cmpxchg.h

index 9733361..97848cd 100644 (file)
@@ -158,53 +158,9 @@ extern void __add_wrong_size(void)
  * value of "*ptr".
  *
  * xadd() is locked when multiple CPUs are online
- * xadd_sync() is always locked
- * xadd_local() is never locked
  */
 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
 #define xadd(ptr, inc)         __xadd((ptr), (inc), LOCK_PREFIX)
-#define xadd_sync(ptr, inc)    __xadd((ptr), (inc), "lock; ")
-#define xadd_local(ptr, inc)   __xadd((ptr), (inc), "")
-
-#define __add(ptr, inc, lock)                                          \
-       ({                                                              \
-               __typeof__ (*(ptr)) __ret = (inc);                      \
-               switch (sizeof(*(ptr))) {                               \
-               case __X86_CASE_B:                                      \
-                       asm volatile (lock "addb %b1, %0\n"             \
-                                     : "+m" (*(ptr)) : "qi" (inc)      \
-                                     : "memory", "cc");                \
-                       break;                                          \
-               case __X86_CASE_W:                                      \
-                       asm volatile (lock "addw %w1, %0\n"             \
-                                     : "+m" (*(ptr)) : "ri" (inc)      \
-                                     : "memory", "cc");                \
-                       break;                                          \
-               case __X86_CASE_L:                                      \
-                       asm volatile (lock "addl %1, %0\n"              \
-                                     : "+m" (*(ptr)) : "ri" (inc)      \
-                                     : "memory", "cc");                \
-                       break;                                          \
-               case __X86_CASE_Q:                                      \
-                       asm volatile (lock "addq %1, %0\n"              \
-                                     : "+m" (*(ptr)) : "ri" (inc)      \
-                                     : "memory", "cc");                \
-                       break;                                          \
-               default:                                                \
-                       __add_wrong_size();                             \
-               }                                                       \
-               __ret;                                                  \
-       })
-
-/*
- * add_*() adds "inc" to "*ptr"
- *
- * __add() takes a lock prefix
- * add_smp() is locked when multiple CPUs are online
- * add_sync() is always locked
- */
-#define add_smp(ptr, inc)      __add((ptr), (inc), LOCK_PREFIX)
-#define add_sync(ptr, inc)     __add((ptr), (inc), "lock; ")
 
 #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)                  \
 ({                                                                     \