We don't support !CONFIG_SMP.
Signed-off-by: Keir Fraser <keir@xen.org>
#define CLEAR_BITMAP(name,bits) \
memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
-// FIXME?: x86-ism used in xen/mm.h
-#define LOCK_PREFIX
-
extern unsigned long total_pages;
extern unsigned long xen_pstart;
extern unsigned long xenheap_size;
#include <xen/config.h>
#include <asm/system.h>
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* NB. I've pushed the volatile qualifier into the operations. This allows
* fast accessors such as _atomic_read() and _atomic_set() which don't give
static __inline__ void atomic_add(int i, atomic_t *v)
{
asm volatile(
- LOCK "addl %1,%0"
+ "lock; addl %1,%0"
:"=m" (*(volatile int *)&v->counter)
:"ir" (i), "m" (*(volatile int *)&v->counter));
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
asm volatile(
- LOCK "subl %1,%0"
+ "lock; subl %1,%0"
:"=m" (*(volatile int *)&v->counter)
:"ir" (i), "m" (*(volatile int *)&v->counter));
}
unsigned char c;
asm volatile(
- LOCK "subl %2,%0; sete %1"
+ "lock; subl %2,%0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
return c;
static __inline__ void atomic_inc(atomic_t *v)
{
asm volatile(
- LOCK "incl %0"
+ "lock; incl %0"
:"=m" (*(volatile int *)&v->counter)
:"m" (*(volatile int *)&v->counter));
}
static __inline__ void atomic_dec(atomic_t *v)
{
asm volatile(
- LOCK "decl %0"
+ "lock; decl %0"
:"=m" (*(volatile int *)&v->counter)
:"m" (*(volatile int *)&v->counter));
}
unsigned char c;
asm volatile(
- LOCK "decl %0; sete %1"
+ "lock; decl %0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"m" (*(volatile int *)&v->counter) : "memory");
return c != 0;
unsigned char c;
asm volatile(
- LOCK "incl %0; sete %1"
+ "lock; incl %0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"m" (*(volatile int *)&v->counter) : "memory");
return c != 0;
unsigned char c;
asm volatile(
- LOCK "addl %2,%0; sets %1"
+ "lock; addl %2,%0; sets %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
return c;
#include <xen/config.h>
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
/*
* We specify the memory operand as both input and output because the memory
* operand is both read from and written to. Since the operand is in fact a
static inline void set_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btsl %1,%0"
+ "lock; btsl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
static inline void clear_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btrl %1,%0"
+ "lock; btrl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
static inline void change_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btcl %1,%0"
+ "lock; btcl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btsl %2,%1\n\tsbbl %0,%0"
+ "lock; btsl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
+ "lock; btrl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btcl %2,%1\n\tsbbl %0,%0"
+ "lock; btcl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
switch ( size )
{
case 1:
- asm volatile ( LOCK_PREFIX "cmpxchgb %b1,%2"
+ asm volatile ( "lock; cmpxchgb %b1,%2"
: "=a" (prev)
: "q" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
: "memory" );
return prev;
case 2:
- asm volatile ( LOCK_PREFIX "cmpxchgw %w1,%2"
+ asm volatile ( "lock; cmpxchgw %w1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
return prev;
#if defined(__i386__)
case 4:
- asm volatile ( LOCK_PREFIX "cmpxchgl %1,%2"
+ asm volatile ( "lock; cmpxchgl %1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
return prev;
#elif defined(__x86_64__)
case 4:
- asm volatile ( LOCK_PREFIX "cmpxchgl %k1,%2"
+ asm volatile ( "lock; cmpxchgl %k1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
: "memory" );
return prev;
case 8:
- asm volatile ( LOCK_PREFIX "cmpxchgq %1,%2"
+ asm volatile ( "lock; cmpxchgq %1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
{
unsigned long long prev;
asm volatile (
- LOCK_PREFIX "cmpxchg8b %3"
+ "lock; cmpxchg8b %3"
: "=A" (prev)
: "c" ((u32)(new>>32)), "b" ((u32)new),
"m" (*__xg((volatile void *)ptr)), "0" (old)
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
break; \
case 8: \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
+ "1: lock; cmpxchg8b %4\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \