sh: support 1 and 2 byte xchg
authorMichael S. Tsirkin <mst@redhat.com>
Thu, 7 Jan 2016 15:54:54 +0000 (17:54 +0200)
committerMichael S. Tsirkin <mst@redhat.com>
Tue, 12 Jan 2016 18:47:01 +0000 (20:47 +0200)
This completes the xchg implementation for sh architecture.  Note: The
llsc variant is tricky since this only supports 4 byte atomics, the
existing implementation of 1 byte xchg is wrong: we need to do a 4 byte
cmpxchg and retry if any bytes changed meanwhile.

Write this in C for clarity.

Suggested-by: Rich Felker <dalias@libc.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
arch/sh/include/asm/cmpxchg-grb.h
arch/sh/include/asm/cmpxchg-irq.h
arch/sh/include/asm/cmpxchg-llsc.h
arch/sh/include/asm/cmpxchg.h

index f848dec..2ed557b 100644 (file)
@@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       unsigned long retval;
+
+       __asm__ __volatile__ (
+               "   .align  2             \n\t"
+               "   mova    1f,   r0      \n\t" /* r0 = end point */
+               "   mov    r15,   r1      \n\t" /* r1 = saved sp */
+               "   mov    #-6,   r15     \n\t" /* LOGIN */
+               "   mov.w  @%1,   %0      \n\t" /* load  old value */
+               "   extu.w  %0,   %0      \n\t" /* extend as unsigned */
+               "   mov.w   %2,   @%1     \n\t" /* store new value */
+               "1: mov     r1,   r15     \n\t" /* LOGOUT */
+               : "=&r" (retval),
+                 "+r"  (m),
+                 "+r"  (val)           /* inhibit r15 overloading */
+               :
+               : "memory" , "r0", "r1");
+
+       return retval;
+}
+
 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 {
        unsigned long retval;
index bd11f63..f888772 100644 (file)
@@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       unsigned long flags, retval;
+
+       local_irq_save(flags);
+       retval = *m;
+       *m = val;
+       local_irq_restore(flags);
+       return retval;
+}
+
 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 {
        unsigned long flags, retval;
index 4713666..e754794 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ASM_SH_CMPXCHG_LLSC_H
 #define __ASM_SH_CMPXCHG_LLSC_H
 
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 {
        unsigned long retval;
@@ -22,29 +25,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
        return retval;
 }
 
-static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
-{
-       unsigned long retval;
-       unsigned long tmp;
-
-       __asm__ __volatile__ (
-               "1:                                     \n\t"
-               "movli.l        @%2, %0 ! xchg_u8       \n\t"
-               "mov            %0, %1                  \n\t"
-               "mov            %3, %0                  \n\t"
-               "movco.l        %0, @%2                 \n\t"
-               "bf             1b                      \n\t"
-               "synco                                  \n\t"
-               : "=&z"(tmp), "=&r" (retval)
-               : "r" (m), "r" (val & 0xff)
-               : "t", "memory"
-       );
-
-       return retval;
-}
-
 static inline unsigned long
-__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
+__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
 {
        unsigned long retval;
        unsigned long tmp;
@@ -68,4 +50,36 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
        return retval;
 }
 
+static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
+{
+       int off = (unsigned long)ptr % sizeof(u32);
+       volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+       int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
+#else
+       int bitoff = off * BITS_PER_BYTE;
+#endif
+       u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+       u32 oldv, newv;
+       u32 ret;
+
+       do {
+               oldv = READ_ONCE(*p);
+               ret = (oldv & bitmask) >> bitoff;
+               newv = (oldv & ~bitmask) | (x << bitoff);
+       } while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+       return ret;
+}
+
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+       return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+       return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
 #endif /* __ASM_SH_CMPXCHG_LLSC_H */
index 85c97b1..5225916 100644 (file)
@@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void);
        case 4:                                         \
                __xchg__res = xchg_u32(__xchg_ptr, x);  \
                break;                                  \
+       case 2:                                         \
+               __xchg__res = xchg_u16(__xchg_ptr, x);  \
+               break;                                  \
        case 1:                                         \
                __xchg__res = xchg_u8(__xchg_ptr, x);   \
                break;                                  \