void mce_barrier_dec(struct mce_softirq_barrier *bar)
{
atomic_inc(&bar->outgen);
- wmb();
+ smp_wmb();
atomic_dec(&bar->val);
}
return;
atomic_inc(&bar->ingen);
gen = atomic_read(&bar->outgen);
- mb();
+ smp_mb();
atomic_inc(&bar->val);
while ( atomic_read(&bar->val) != num_online_cpus() &&
atomic_read(&bar->outgen) == gen )
{
- mb();
+ smp_mb();
mce_panic_check();
}
}
return;
atomic_inc(&bar->outgen);
gen = atomic_read(&bar->ingen);
- mb();
+ smp_mb();
atomic_dec(&bar->val);
while ( atomic_read(&bar->val) != 0 &&
atomic_read(&bar->ingen) == gen )
{
- mb();
+ smp_mb();
mce_panic_check();
}
}
ltep->mcte_prev = *procltp;
*procltp = dangling[target];
}
- wmb();
+ smp_wmb();
dangling[target] = NULL;
- wmb();
+ smp_wmb();
}
mctelem_cookie_t mctelem_consume_oldest_begin(mctelem_class_t which)
* CPU is seen by notified remote CPUs. The WRMSR contained within
* apic_icr_write() can otherwise be executed early.
*
- * The reason mb() is sufficient here is subtle: the register arguments
+ * The reason smp_mb() is sufficient here is subtle: the register arguments
* to WRMSR must depend on a memory read executed after the barrier. This
* is guaranteed by cpu_physical_id(), which reads from a global array (and
* so cannot be hoisted above the barrier even by a clever compiler).
*/
- mb();
+ smp_mb();
local_irq_save(flags);
const cpumask_t *cluster_cpus;
unsigned long flags;
- mb(); /* See above for an explanation. */
+ smp_mb(); /* See above for an explanation. */
local_irq_save(flags);
hpet_events[i].shift = 32;
hpet_events[i].next_event = STIME_MAX;
spin_lock_init(&hpet_events[i].lock);
- wmb();
+ smp_wmb();
hpet_events[i].event_handler = handle_hpet_broadcast;
hpet_events[i].msi.msi_attrib.maskbit = 1;
{
unsigned int state = p->state;
- rmb();
+ smp_rmb();
switch ( state )
{
case STATE_IOREQ_NONE:
}
/* Make the ioreq_t visible /before/ write_pointer. */
- wmb();
+ smp_wmb();
pg->ptrs.write_pointer += qw ? 2 : 1;
/* Canonicalize read/write pointers to prevent their overflow. */
ASSERT(spin_is_locked(&desc->lock));
desc->status &= ~IRQ_MOVE_PENDING;
- wmb();
+ smp_wmb();
cpumask_copy(desc->arch.pending_mask, mask);
- wmb();
+ smp_wmb();
desc->status |= IRQ_MOVE_PENDING;
}
CPU_STATE_CALLIN, /* slave -> master: Completed phase 2 */
CPU_STATE_ONLINE /* master -> slave: Go fully online now. */
} cpu_state;
-#define set_cpu_state(state) do { mb(); cpu_state = (state); } while (0)
+#define set_cpu_state(state) do { smp_mb(); cpu_state = (state); } while (0)
void *stack_base[NR_CPUS];
for ( i = 1; i <= 5; i++ )
{
tsc_value = rdtsc_ordered();
- wmb();
+ smp_wmb();
atomic_inc(&tsc_count);
while ( atomic_read(&tsc_count) != (i<<1) )
cpu_relax();
{
while ( atomic_read(&tsc_count) != ((i<<1)-1) )
cpu_relax();
- rmb();
+ smp_rmb();
/*
* If a CPU has been physically hotplugged, we may as well write
* to its TSC in spite of X86_FEATURE_TSC_RELIABLE. The platform does
}
else if ( cpu_state == CPU_STATE_DEAD )
{
- rmb();
+ smp_rmb();
rc = cpu_error;
}
else
{
boot_error = 1;
- mb();
+ smp_mb();
if ( bootsym(trampoline_cpu_started) == 0xA5 )
/* trampoline started but...? */
printk("Stuck ??\n");
/* mark "stuck" area as not stuck */
bootsym(trampoline_cpu_started) = 0;
- mb();
+ smp_mb();
smpboot_restore_warm_reset_vector();
/* 1. Update guest kernel version. */
_u.version = u->version = version_update_begin(u->version);
- wmb();
+ smp_wmb();
/* 2. Update all other guest kernel fields. */
*u = _u;
- wmb();
+ smp_wmb();
/* 3. Update guest kernel version. */
u->version = version_update_end(u->version);
update_guest_memory_policy(v, &policy);
return false;
}
- wmb();
+ smp_wmb();
/* 2. Update all other userspace fields. */
__copy_to_guest(user_u, u, 1);
- wmb();
+ smp_wmb();
/* 3. Update userspace version. */
u->version = version_update_end(u->version);
__copy_field_to_guest(user_u, u, version);
#define _set_gate(gate_addr,type,dpl,addr) \
do { \
(gate_addr)->a = 0; \
- wmb(); /* disable gate /then/ rewrite */ \
+ smp_wmb(); /* disable gate /then/ rewrite */ \
(gate_addr)->b = \
((unsigned long)(addr) >> 32); \
- wmb(); /* rewrite /then/ enable gate */ \
+ smp_wmb(); /* rewrite /then/ enable gate */ \
(gate_addr)->a = \
(((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
((unsigned long)(dpl) << 45) | \
#define _set_tssldt_desc(desc,addr,limit,type) \
do { \
(desc)[0].b = (desc)[1].b = 0; \
- wmb(); /* disable entry /then/ rewrite */ \
+ smp_wmb(); /* disable entry /then/ rewrite */ \
(desc)[0].a = \
((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
(desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
- wmb(); /* rewrite /then/ enable entry */ \
+ smp_wmb(); /* rewrite /then/ enable entry */ \
(desc)[0].b = \
((u32)(addr) & 0xFF000000U) | \
((u32)(type) << 8) | 0x8000U | \
#define smp_wmb() wmb()
#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
#define local_irq_disable() asm volatile ( "cli" : : : "memory" )
#define local_irq_enable() asm volatile ( "sti" : : : "memory" )