int apic1, pin1, apic2, pin2;
int vector, ret;
unsigned long flags;
- cpumask_t mask_all;
local_irq_save(flags);
vector = IRQ0_VECTOR;
clear_irq_vector(0);
- cpumask_setall(&mask_all);
- if ((ret = bind_irq_vector(0, vector, &mask_all)))
+ if ((ret = bind_irq_vector(0, vector, &cpumask_all)))
printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret);
irq_desc[0].status &= ~IRQ_DISABLED;
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
struct domain *d = v->domain;
- cpumask_t allcpus;
-
- cpumask_setall(&allcpus);
v->processor = processor;
* domain-0 VCPUs, are pinned onto their respective physical CPUs.
*/
if ( is_idle_domain(d) || d->is_pinned )
- sched_set_affinity(v, cpumask_of(processor), &allcpus);
+ sched_set_affinity(v, cpumask_of(processor), &cpumask_all);
else
- sched_set_affinity(v, &allcpus, &allcpus);
+ sched_set_affinity(v, &cpumask_all, &cpumask_all);
/* Idle VCPUs are scheduled immediately, so don't put them in runqueue. */
if ( is_idle_domain(d) )
for_each_vcpu ( d, v )
{
spinlock_t *lock;
- cpumask_t allcpus;
vcpudata = v->sched_priv;
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- cpumask_setall(&allcpus);
-
lock = vcpu_schedule_lock_irq(v);
- sched_set_affinity(v, &allcpus, &allcpus);
+ sched_set_affinity(v, &cpumask_all, &cpumask_all);
v->processor = new_p;
/*
if ( cpumask_empty(&online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
{
- cpumask_t allcpus;
-
if ( v->affinity_broken )
{
/* The vcpu is temporarily pinned, can't move it. */
else
printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
- cpumask_setall(&allcpus);
- sched_set_affinity(v, &allcpus, NULL);
+ sched_set_affinity(v, &cpumask_all, NULL);
}
if ( v->processor != cpu )