This is to avoid picking CPU0 for almost any such operation, resulting
in very uneven distribution of interrupt load.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask)
{
/* As we are using single CPU as destination, pick only one CPU here */
- return cpu_physical_id(cpumask_first(cpumask));
+ return cpu_physical_id(cpumask_any(cpumask));
}
static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
{
- unsigned int cpu = cpumask_first(cpumask);
+ unsigned int cpu = cpumask_any(cpumask);
unsigned int dest = per_cpu(cpu_2_logical_apicid, cpu);
const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu);