hvm_dpci_msi_eoi(current->domain, vector);
}
+static bool_t is_multicast_dest(struct vlapic *vlapic, unsigned int short_hand,
+ uint32_t dest, bool_t dest_mode)
+{
+ if ( vlapic_domain(vlapic)->max_vcpus <= 2 )
+ return 0;
+
+ if ( short_hand )
+ return short_hand != APIC_DEST_SELF;
+
+ if ( vlapic_x2apic_mode(vlapic) )
+ return dest_mode ? hweight16(dest) > 1 : dest == 0xffffffff;
+
+ if ( dest_mode )
+ return hweight8(dest &
+ GET_xAPIC_DEST_FIELD(vlapic_get_reg(vlapic,
+ APIC_DFR))) > 1;
+
+ return dest == 0xff;
+}
+
void vlapic_ipi(
struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
{
default: {
struct vcpu *v;
+ bool_t batch = is_multicast_dest(vlapic, short_hand, dest, dest_mode);
+
+ if ( batch )
+ cpu_raise_softirq_batch_begin();
for_each_vcpu ( vlapic_domain(vlapic), v )
{
if ( vlapic_match_dest(vcpu_vlapic(v), vlapic,
short_hand, dest, dest_mode) )
vlapic_accept_irq(v, icr_low);
}
+ if ( batch )
+ cpu_raise_softirq_batch_finish();
break;
}
}
static softirq_handler softirq_handlers[NR_SOFTIRQS];
+static DEFINE_PER_CPU(cpumask_t, batch_mask);
+static DEFINE_PER_CPU(unsigned int, batching);
+
static void __do_softirq(unsigned long ignore_mask)
{
unsigned int i, cpu;
void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
{
unsigned int cpu, this_cpu = smp_processor_id();
- cpumask_t send_mask;
+ cpumask_t send_mask, *raise_mask;
+
+ if ( !per_cpu(batching, this_cpu) || in_irq() )
+ {
+ cpumask_clear(&send_mask);
+ raise_mask = &send_mask;
+ }
+ else
+ raise_mask = &per_cpu(batch_mask, this_cpu);
- cpumask_clear(&send_mask);
for_each_cpu(cpu, mask)
if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
cpu != this_cpu &&
!arch_skip_send_event_check(cpu) )
- cpumask_set_cpu(cpu, &send_mask);
+ cpumask_set_cpu(cpu, raise_mask);
- smp_send_event_check_mask(&send_mask);
+ if ( raise_mask == &send_mask )
+ smp_send_event_check_mask(raise_mask);
}
void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
- if ( !test_and_set_bit(nr, &softirq_pending(cpu))
- && (cpu != smp_processor_id())
- && !arch_skip_send_event_check(cpu) )
+ unsigned int this_cpu = smp_processor_id();
+
+ if ( test_and_set_bit(nr, &softirq_pending(cpu))
+ || (cpu == this_cpu)
+ || arch_skip_send_event_check(cpu) )
+ return;
+
+ if ( !per_cpu(batching, this_cpu) || in_irq() )
smp_send_event_check_cpu(cpu);
+ else
+ set_bit(nr, &per_cpu(batch_mask, this_cpu));
+}
+
+void cpu_raise_softirq_batch_begin(void)
+{
+ ++this_cpu(batching);
+}
+
+void cpu_raise_softirq_batch_finish(void)
+{
+ unsigned int cpu, this_cpu = smp_processor_id();
+ cpumask_t *mask = &per_cpu(batch_mask, this_cpu);
+
+ ASSERT(per_cpu(batching, this_cpu));
+ for_each_cpu ( cpu, mask )
+ if ( !softirq_pending(cpu) )
+ cpumask_clear_cpu(cpu, mask);
+ smp_send_event_check_mask(mask);
+ cpumask_clear(mask);
+ --per_cpu(batching, this_cpu);
}
void raise_softirq(unsigned int nr)