Pull battery into release branch
[cascardo/linux.git] / include / asm-ia64 / atomic.h
index 569ec75..50c2b83 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 
 #include <asm/intrinsics.h>
+#include <asm/system.h>
 
 /*
  * On IA-64, counter must always be volatile to ensure that that the
@@ -54,7 +55,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
 
        do {
                CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
+               old = atomic64_read(v);
                new = old + i;
        } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
        return new;
@@ -82,31 +83,53 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
 
        do {
                CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
+               old = atomic64_read(v);
                new = old - i;
        } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
        return new;
 }
 
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+#define atomic64_cmpxchg(v, old, new) \
+       (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
 #define atomic_add_return(i,v)                                         \
 ({                                                                     \
        int __ia64_aar_i = (i);                                         \