atomic: Define {read,write}_atomic() for reading/writing memory atomically.
authorKeir Fraser <keir@xen.org>
Fri, 25 Nov 2011 13:31:58 +0000 (13:31 +0000)
committerKeir Fraser <keir@xen.org>
Fri, 25 Nov 2011 13:31:58 +0000 (13:31 +0000)
Signed-off-by: Keir Fraser <keir@xen.org>
xen/arch/x86/mm.c
xen/arch/x86/mm/p2m-ept.c
xen/arch/x86/x86_32/seg_fixup.c
xen/common/timer.c
xen/include/asm-ia64/linux-xen/asm/atomic.h
xen/include/asm-x86/atomic.h
xen/include/asm-x86/x86_32/page.h
xen/include/asm-x86/x86_64/page.h

index 8b4ab90d21b79772299543dc4446967410dd3f7d..b00c277ac9304d854a68382a69ea35cb2d2662e9 100644 (file)
@@ -4629,7 +4629,7 @@ long do_update_descriptor(u64 pa, u64 desc)
 
     /* All is good so make the update. */
     gdt_pent = map_domain_page(mfn);
-    atomic_write64((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
+    write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
     unmap_domain_page(gdt_pent);
 
     put_page_type(page);
index 9ccf14617f101b5ff12413274dc914b846d7fade..a994364cfd42ead49fd81f03aabc9b2d0f052175 100644 (file)
@@ -35,9 +35,9 @@
 #include "mm-locks.h"
 
 #define atomic_read_ept_entry(__pepte)                              \
-    ( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
+    ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
 #define atomic_write_ept_entry(__pepte, __epte)                     \
-    atomic_write64(&(__pepte)->epte, (__epte).epte)
+    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
index 13cf50e6882a8a214b541e9d266c57cad2ed20fa..7baa963afbbb2da23c28131f6bb62a828ebb2732 100644 (file)
@@ -314,7 +314,7 @@ static int fixup_seg(u16 seg, unsigned long offset)
     b &= ~0xf0000; b |= limit & 0xf0000;
     b ^= _SEGMENT_EC; /* grows-up <-> grows-down */
     /* NB. This can't fault. Checked readable above; must also be writable. */
-    atomic_write64((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
+    write_atomic((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
     return 1;
 }
 
index 0547ea31a75798e3682663ca5b41189a88531f66..0dd24764834eeb1ef37dacdc139aa0d644b4e29c 100644 (file)
@@ -239,7 +239,7 @@ static inline bool_t timer_lock(struct timer *timer)
 
     for ( ; ; )
     {
-        cpu = atomic_read16(&timer->cpu);
+        cpu = read_atomic(&timer->cpu);
         if ( unlikely(cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
@@ -292,7 +292,7 @@ void init_timer(
     memset(timer, 0, sizeof(*timer));
     timer->function = function;
     timer->data = data;
-    atomic_write16(&timer->cpu, cpu);
+    write_atomic(&timer->cpu, cpu);
     timer->status = TIMER_STATUS_inactive;
     if ( !timer_lock_irqsave(timer, flags) )
         BUG();
@@ -343,7 +343,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu)
 
     for ( ; ; )
     {
-        old_cpu = atomic_read16(&timer->cpu);
+        old_cpu = read_atomic(&timer->cpu);
         if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
@@ -375,7 +375,7 @@ void migrate_timer(struct timer *timer, unsigned int new_cpu)
         deactivate_timer(timer);
 
     list_del(&timer->inactive);
-    atomic_write16(&timer->cpu, new_cpu);
+    write_atomic(&timer->cpu, new_cpu);
     list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
 
     if ( active )
@@ -402,7 +402,7 @@ void kill_timer(struct timer *timer)
     list_del(&timer->inactive);
     timer->status = TIMER_STATUS_killed;
     old_cpu = timer->cpu;
-    atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
+    write_atomic(&timer->cpu, TIMER_CPU_status_killed);
 
     spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
 
@@ -573,7 +573,7 @@ static void migrate_timers_from_cpu(unsigned int old_cpu)
              ? old_ts->heap[1] : old_ts->list) != NULL )
     {
         remove_entry(t);
-        atomic_write16(&t->cpu, new_cpu);
+        write_atomic(&t->cpu, new_cpu);
         notify |= add_entry(t);
     }
 
@@ -581,7 +581,7 @@ static void migrate_timers_from_cpu(unsigned int old_cpu)
     {
         t = list_entry(old_ts->inactive.next, struct timer, inactive);
         list_del(&t->inactive);
-        atomic_write16(&t->cpu, new_cpu);
+        write_atomic(&t->cpu, new_cpu);
         list_add(&t->inactive, &new_ts->inactive);
     }
 
index 6849181eb24a3284b0992b62c4659272b8615a5d..c006ae28ac420bac24b79207d3a09a3e14773668 100644 (file)
@@ -39,8 +39,8 @@ typedef struct { volatile __s64 counter; } atomic64_t;
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define build_atomic_read(tag, type) \
-static inline type atomic_read##tag(const volatile type *addr) \
+#define build_read_atomic(tag, type) \
+static inline type read_##tag##_atomic(const volatile type *addr) \
 { \
        type ret; \
        asm volatile("ld%2.acq %0 = %1" \
@@ -49,37 +49,62 @@ static inline type atomic_read##tag(const volatile type *addr) \
        return ret; \
 }
 
-#define build_atomic_write(tag, type) \
-static inline void atomic_write##tag(volatile type *addr, type val) \
+#define build_write_atomic(tag, type) \
+static inline void write_##tag##_atomic(volatile type *addr, type val) \
 { \
        asm volatile("st%2.rel %0 = %1" \
                     : "=m" (*addr) \
                     : "r" (val), "i" (sizeof(type))); \
 }
 
-build_atomic_read(8, uint8_t)
-build_atomic_read(16, uint16_t)
-build_atomic_read(32, uint32_t)
-build_atomic_read(64, uint64_t)
-build_atomic_read(_int, int)
-build_atomic_read(_long, long)
+build_read_atomic(u8, uint8_t)
+build_read_atomic(u16, uint16_t)
+build_read_atomic(u32, uint32_t)
+build_read_atomic(u64, uint64_t)
+
+build_write_atomic(u8, uint8_t)
+build_write_atomic(u16, uint16_t)
+build_write_atomic(u32, uint32_t)
+build_write_atomic(u64, uint64_t)
+
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
+    case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
+    case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break;    \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
 
-build_atomic_write(8, uint8_t)
-build_atomic_write(16, uint16_t)
-build_atomic_write(32, uint32_t)
-build_atomic_write(64, uint64_t)
-build_atomic_write(_int, int)
-build_atomic_write(_long, long)
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
+    case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
+    case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break;      \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
 
 #define _atomic_read(v)                ((v).counter)
 #define _atomic64_read(v)      ((v).counter)
-#define atomic_read(v)         atomic_read_int(&((v)->counter))
-#define atomic64_read(v)       atomic_read_long(&((v)->counter))
+#define atomic_read(v)         read_atomic(&((v)->counter))
+#define atomic64_read(v)       read_atomic(&((v)->counter))
 
 #define _atomic_set(v,i)       (((v).counter) = (i))
 #define _atomic64_set(v,i)     (((v).counter) = (i))
-#define atomic_set(v,i)                atomic_write_int(&((v)->counter), i)
-#define atomic64_set(v,l)      atomic_write_long(&((v)->counter), l)
+#define atomic_set(v,i)                write_atomic(&((v)->counter), i)
+#define atomic64_set(v,l)      write_atomic(&((v)->counter), l)
 
 #endif
 
index 614a2aef51141e7f383fa42d6c5edcf91d03ca54..d13277931bbbb46f4081e6a488a1725e91a0044a 100644 (file)
@@ -4,36 +4,34 @@
 #include <xen/config.h>
 #include <asm/system.h>
 
-#define build_atomic_read(name, size, type, reg, barrier) \
+#define build_read_atomic(name, size, type, reg, barrier) \
 static inline type name(const volatile type *addr) \
 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
 :"m" (*(volatile type *)addr) barrier); return ret; }
 
-#define build_atomic_write(name, size, type, reg, barrier) \
+#define build_write_atomic(name, size, type, reg, barrier) \
 static inline void name(volatile type *addr, type val) \
 { asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \
 :reg (val) barrier); }
 
-build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
-build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
-build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
-build_atomic_read(atomic_read_int, "l", int, "=r", )
+build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
+build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
+build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
 
-build_atomic_write(atomic_write8, "b", uint8_t, "q", )
-build_atomic_write(atomic_write16, "w", uint16_t, "r", )
-build_atomic_write(atomic_write32, "l", uint32_t, "r", )
-build_atomic_write(atomic_write_int, "l", int, "r", )
+build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
+build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
+build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
 
 #ifdef __x86_64__
-build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
-build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
+build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 #else
-static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+static inline uint64_t read_u64_atomic(const volatile uint64_t *addr)
 {
     uint64_t *__addr = (uint64_t *)addr;
     return __cmpxchg8b(__addr, 0, 0);
 }
-static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+static inline void write_u64_atomic(volatile uint64_t *addr, uint64_t val)
 {
     uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
     while ( (new = __cmpxchg8b(__addr, old, val)) != old )
@@ -41,8 +39,34 @@ static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
 }
 #endif
 
-#undef build_atomic_read
-#undef build_atomic_write
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
+    case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
+    case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break;    \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
+
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
+    case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
+    case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break;      \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
 
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
@@ -60,7 +84,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically reads the value of @v.
  */
 #define _atomic_read(v)  ((v).counter)
-#define atomic_read(v)   atomic_read_int(&((v)->counter))
+#define atomic_read(v)   read_atomic(&((v)->counter))
 
 /**
  * atomic_set - set atomic variable
@@ -70,7 +94,7 @@ typedef struct { int counter; } atomic_t;
  * Atomically sets the value of @v to @i.
  */ 
 #define _atomic_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i)  atomic_write_int(&((v)->counter), (i))
+#define atomic_set(v,i)  write_atomic(&((v)->counter), (i))
 
 /**
  * atomic_add - add integer to atomic variable
index 8dd4310b274e6aa9e387d20bb42560cedba2c6d0..9b2c7733cfd103fb485fb0d2e38c2933f093edf4 100644 (file)
@@ -85,15 +85,15 @@ extern unsigned int PAGE_HYPERVISOR_NOCACHE;
 
 #endif
 
-#define pte_read_atomic(ptep)       atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte) do {                             \
-    u32 *__ptep_words = (u32 *)(ptep);                        \
-    atomic_write32(&__ptep_words[0], 0);                      \
-    wmb();                                                    \
-    atomic_write32(&__ptep_words[1], (pte) >> 32);            \
-    wmb();                                                    \
-    atomic_write32(&__ptep_words[0], (pte) >>  0);            \
+#define pte_read_atomic(ptep)       read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte) do {                   \
+    u32 *__ptep_words = (u32 *)(ptep);              \
+    write_atomic(&__ptep_words[0], 0);              \
+    wmb();                                          \
+    write_atomic(&__ptep_words[1], (pte) >> 32);    \
+    wmb();                                          \
+    write_atomic(&__ptep_words[0], (pte) >>  0);    \
 } while ( 0 )
 
 /* root table */
index 82397c4d60faafed0f211315fc6a04f47f7572f4..2f46ba2c5a15b40de59bc94498a582f535ea0aa4 100644 (file)
@@ -116,9 +116,9 @@ typedef l4_pgentry_t root_pgentry_t;
 
 #endif /* !__ASSEMBLY__ */
 
-#define pte_read_atomic(ptep)       atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte)        atomic_write64(ptep, pte)
+#define pte_read_atomic(ptep)       read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte)        write_atomic(ptep, pte)
 
 /* Given a virtual address, get an entry offset into a linear page table. */
 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)