More TLB flush fixes.
#include <xen/config.h>
#include <xen/sched.h>
+#include <xen/interrupt.h>
#include <asm/flushtlb.h>
u32 tlbflush_clock;
#ifdef CONFIG_SMP
if ( unlikely(((y = ny+1) & TLBCLOCK_EPOCH_MASK) == 0) )
{
- new_tlbflush_clock_period();
+ raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
y = tlbflush_clock;
break;
}
memguard_guard_range(cpu0_stack, PAGE_SIZE);
#endif
+ open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
+ (void *)new_tlbflush_clock_period,
+ NULL);
+
if ( opt_watchdog )
nmi_watchdog = NMI_LOCAL_APIC;
}
}
+/*
+ * NB. Must be called with no locks held and interrupts enabled.
+ * (e.g., softirq context).
+ */
void new_tlbflush_clock_period(void)
{
- /* Avoid deadlock because we might be reentering a flush_lock region. */
- if ( unlikely(!spin_trylock(&flush_lock)) )
- return;
+ spin_lock(&flush_lock);
/* Someone may acquire the lock and execute the flush before us. */
if ( ((tlbflush_clock+1) & TLBCLOCK_EPOCH_MASK) != 0 )
void raise_softirq(unsigned int nr)
{
- cpu_raise_softirq(smp_processor_id(), nr);
+ __cpu_raise_softirq(smp_processor_id(), nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
AC_TIMER_SOFTIRQ,
TASKLET_SOFTIRQ,
BLKDEV_RESPONSE_SOFTIRQ,
- NET_TX_SOFTIRQ
+ NET_TX_SOFTIRQ,
+ NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ
};
/* softirq mask and active fields moved to irq_cpustat_t in