x86: Define atomic_{read,write}{8,16,32,64} accessor functions.
authorKeir Fraser <keir@xen.org>
Thu, 16 Dec 2010 19:29:08 +0000 (19:29 +0000)
committerKeir Fraser <keir@xen.org>
Thu, 16 Dec 2010 19:29:08 +0000 (19:29 +0000)
These absolutely guarantee to read/write a uint*_t with a single atomic
processor instruction.

Also re-define atomic_read/atomic_write (act on atomic_t) similarly.

Signed-off-by: Keir Fraser <keir@xen.org>
xen/include/asm-x86/atomic.h
xen/include/asm-x86/x86_32/system.h
xen/include/asm-x86/x86_64/system.h

index 8fc7436462e9378836c7b716de2e6e8f6cee703f..30ead03ce73829e154f2aa2d266b32b83306116e 100644 (file)
@@ -4,6 +4,46 @@
 #include <xen/config.h>
 #include <asm/system.h>
 
+#define build_atomic_read(name, size, type, reg, barrier) \
+static inline type name(const volatile type *addr) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
+:"m" (*(volatile type *)addr) barrier); return ret; }
+
+#define build_atomic_write(name, size, type, reg, barrier) \
+static inline void name(volatile type *addr, type val) \
+{ asm volatile("mov" size " %0,%1": :reg (val), \
+"m" (*(volatile type *)addr) barrier); }
+
+build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
+build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
+build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
+build_atomic_read(atomic_read_int, "l", int, "=r", )
+
+build_atomic_write(atomic_write8, "b", uint8_t, "q", )
+build_atomic_write(atomic_write16, "w", uint16_t, "r", )
+build_atomic_write(atomic_write32, "l", uint32_t, "r", )
+build_atomic_write(atomic_write_int, "l", int, "r", )
+
+#ifdef __x86_64__
+build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
+build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+#else
+static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+{
+    uint64_t *__addr = (uint64_t *)addr;
+    return __cmpxchg8b(__addr, 0, 0);
+}
+static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+{
+    uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
+    while ( (old = __cmpxchg8b(__addr, old, val)) != old )
+        old = new;
+}
+#endif
+
+#undef build_atomic_read
+#undef build_atomic_write
+
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
  * fast accessors such as _atomic_read() and _atomic_set() which don't give
@@ -20,7 +60,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically reads the value of @v.
  */
 #define _atomic_read(v)  ((v).counter)
-#define atomic_read(v)   (*(volatile int *)&((v)->counter))
+#define atomic_read(v)   atomic_read_int(&((v)->counter))
 
 /**
  * atomic_set - set atomic variable
@@ -30,7 +70,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically sets the value of @v to @i.
  */ 
 #define _atomic_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i)  (*(volatile int *)&((v)->counter) = (i))
+#define atomic_set(v,i)  atomic_write_int(&((v)->counter), (i))
 
 /**
  * atomic_add - add integer to atomic variable
index 0ec103d449a10baea642d579d6f4d745e548444a..40aa63c1c771e29619ffbefd451a526ded556eb7 100644 (file)
@@ -91,13 +91,6 @@ static always_inline unsigned long long __cmpxchg8b(
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    uint64_t w = *p, x;
-    while ( (x = __cmpxchg8b(p, w, v)) != w )
-        w = x;
-}
-
 #define mb()                    \
     asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
 
index 3d0a294e3a58da31bda571628eb6968406c001a0..4f183c053523d0a76307239df9289935db8da666 100644 (file)
     _rc;                                                                \
 })
 
-static inline void atomic_write64(uint64_t *p, uint64_t v)
-{
-    *p = v;
-}
-
 #define mb()                    \
     asm volatile ( "mfence" : : : "memory" )