cpumask_andnot(mask, mask, &target);
}
+bool_t arch_skip_send_event_check(unsigned int cpu)
+{
+ /*
+ * This relies on softirq_pending() and mwait_wakeup() to access data
+ * on the same cache line.
+ */
+ smp_mb();
+ return !!cpumask_test_cpu(cpu, &cpuidle_mwait_flags);
+}
+
void mwait_idle_with_hints(unsigned int eax, unsigned int ecx)
{
unsigned int cpu = smp_processor_id();
* Timer deadline passing is the event on which we will be woken via
* cpuidle_mwait_wakeup. So check it now that the location is armed.
*/
- if ( expires > NOW() || expires == 0 )
+ if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) )
{
cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
__mwait(eax, ecx);
void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
{
- int cpu;
+ unsigned int cpu, this_cpu = smp_processor_id();
cpumask_t send_mask;
cpumask_clear(&send_mask);
for_each_cpu(cpu, mask)
- if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+ if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
+ cpu != this_cpu &&
+ !arch_skip_send_event_check(cpu) )
cpumask_set_cpu(cpu, &send_mask);
smp_send_event_check_mask(&send_mask);
void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
if ( !test_and_set_bit(nr, &softirq_pending(cpu))
- && (cpu != smp_processor_id()) )
+ && (cpu != smp_processor_id())
+ && !arch_skip_send_event_check(cpu) )
smp_send_event_check_cpu(cpu);
}
#define NR_ARCH_SOFTIRQS 0
+#define arch_skip_send_event_check(cpu) 0
+
#endif /* __ASM_SOFTIRQ_H__ */
/*
* Local variables:
#define PCI_SERR_SOFTIRQ (NR_COMMON_SOFTIRQS + 4)
#define NR_ARCH_SOFTIRQS 5
+bool_t arch_skip_send_event_check(unsigned int cpu);
+
#endif /* __ASM_SOFTIRQ_H__ */