void send_IPI_mask(const cpumask_t *mask, int vector)
{
bool cpus_locked = false;
- cpumask_t *scratch = this_cpu(scratch_cpumask);
+ cpumask_t *scratch = this_cpu(send_ipi_cpumask);
if ( in_irq() || in_mce_handler() || in_nmi_handler() )
{
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, scratch_cpumask);
static cpumask_t scratch_cpu0mask;
+DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, send_ipi_cpumask);
+static cpumask_t send_ipi_cpu0mask;
+
cpumask_t cpu_online_map __read_mostly;
EXPORT_SYMBOL(cpu_online_map);
FREE_CPUMASK_VAR(per_cpu(cpu_core_mask, cpu));
if ( per_cpu(scratch_cpumask, cpu) != &scratch_cpu0mask )
FREE_CPUMASK_VAR(per_cpu(scratch_cpumask, cpu));
+ if ( per_cpu(send_ipi_cpumask, cpu) != &send_ipi_cpu0mask )
+ FREE_CPUMASK_VAR(per_cpu(send_ipi_cpumask, cpu));
}
cleanup_cpu_root_pgt(cpu);
if ( !(cond_zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) &&
cond_zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) &&
- cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu))) )
+ cond_alloc_cpumask_var(&per_cpu(scratch_cpumask, cpu)) &&
+ cond_alloc_cpumask_var(&per_cpu(send_ipi_cpumask, cpu))) )
goto out;
rc = 0;
cpumask_set_cpu(cpu, &cpu_present_map);
#if NR_CPUS > 2 * BITS_PER_LONG
per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask;
+ per_cpu(send_ipi_cpumask, cpu) = &send_ipi_cpu0mask;
#endif
get_cpu_info()->use_pv_cr3 = false;
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_mask);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_mask);
DECLARE_PER_CPU(cpumask_var_t, scratch_cpumask);
+DECLARE_PER_CPU(cpumask_var_t, send_ipi_cpumask);
/*
* Do we, for platform reasons, need to actually keep CPUs online when we