From: David Vrabel Date: Fri, 8 May 2015 08:59:26 +0000 (+0200) Subject: x86: provide arch_fetch_and_add() X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~3321 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=2bfc9fc52ce8485fa43e79bbdc32360c74e12fe8;p=xen.git x86: provide arch_fetch_and_add() arch_fetch_and_add() atomically adds a value and returns the previous value. This is needed to implement ticket locks. Signed-off-by: David Vrabel --- diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h index 71113295bd..efe721cb44 100644 --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -117,6 +117,52 @@ static always_inline unsigned long __cmpxchg( (unsigned long)__n,sizeof(*(ptr)))); \ }) +/* + * Undefined symbol to cause link failure if a wrong size is used with + * arch_fetch_and_add(). + */ +extern unsigned long __bad_fetch_and_add_size(void); + +static always_inline unsigned long __xadd( + volatile void *ptr, unsigned long v, int size) +{ + switch ( size ) + { + case 1: + asm volatile ( "lock; xaddb %b0,%1" + : "+r" (v), "+m" (*__xg(ptr)) + :: "memory"); + return v; + case 2: + asm volatile ( "lock; xaddw %w0,%1" + : "+r" (v), "+m" (*__xg(ptr)) + :: "memory"); + return v; + case 4: + asm volatile ( "lock; xaddl %k0,%1" + : "+r" (v), "+m" (*__xg(ptr)) + :: "memory"); + return v; + case 8: + asm volatile ( "lock; xaddq %q0,%1" + : "+r" (v), "+m" (*__xg(ptr)) + :: "memory"); + + return v; + default: + return __bad_fetch_and_add_size(); + } +} + +/* + * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns + * the previous value. + * + * This is a full memory barrier. + */ +#define arch_fetch_and_add(ptr, v) \ + ((typeof(*(ptr)))__xadd(ptr, (typeof(*(ptr)))(v), sizeof(*(ptr)))) + /* * Both Intel and AMD agree that, from a programmer's viewpoint: * Loads cannot be reordered relative to other loads.