/* Set the tmp value unconditionally, so that
* the check in the iret hypercall works. */
- cpumask_copy(st->vcpu->cpu_affinity_tmp, st->vcpu->cpu_affinity);
+ cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
+ st->vcpu->cpu_hard_affinity);
if ((cpu != st->processor)
|| (st->processor != st->vcpu->processor))
return;
/* Restore affinity. */
- if ( !cpumask_empty(curr->cpu_affinity_tmp) &&
- !cpumask_equal(curr->cpu_affinity_tmp, curr->cpu_affinity) )
+ if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) &&
+ !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) )
{
- vcpu_set_affinity(curr, curr->cpu_affinity_tmp);
- cpumask_clear(curr->cpu_affinity_tmp);
+ vcpu_set_affinity(curr, curr->cpu_hard_affinity_tmp);
+ cpumask_clear(curr->cpu_hard_affinity_tmp);
}
if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
- if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
- !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
- !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
+ if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
+ !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
+ !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
!zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
goto fail_free;
fail_wq:
destroy_waitqueue_vcpu(v);
fail_free:
- free_cpumask_var(v->cpu_affinity);
- free_cpumask_var(v->cpu_affinity_tmp);
- free_cpumask_var(v->cpu_affinity_saved);
+ free_cpumask_var(v->cpu_hard_affinity);
+ free_cpumask_var(v->cpu_hard_affinity_tmp);
+ free_cpumask_var(v->cpu_hard_affinity_saved);
free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
for_each_vcpu ( d, v )
{
- cpumask_and(online_affinity, v->cpu_affinity, online);
+ cpumask_and(online_affinity, v->cpu_hard_affinity, online);
cpumask_or(cpumask, cpumask, online_affinity);
}
for ( i = d->max_vcpus - 1; i >= 0; i-- )
if ( (v = d->vcpu[i]) != NULL )
{
- free_cpumask_var(v->cpu_affinity);
- free_cpumask_var(v->cpu_affinity_tmp);
- free_cpumask_var(v->cpu_affinity_saved);
+ free_cpumask_var(v->cpu_hard_affinity);
+ free_cpumask_var(v->cpu_hard_affinity_tmp);
+ free_cpumask_var(v->cpu_hard_affinity_saved);
free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
}
v->async_exception_mask = 0;
memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
#endif
- cpumask_clear(v->cpu_affinity_tmp);
+ cpumask_clear(v->cpu_hard_affinity_tmp);
clear_bit(_VPF_blocked, &v->pause_flags);
clear_bit(_VPF_in_reset, &v->pause_flags);
else
{
ret = cpumask_to_xenctl_bitmap(
- &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
+ &op->u.vcpuaffinity.cpumap, v->cpu_hard_affinity);
}
}
break;
!vcpu_event_delivery_is_enabled(v));
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
printk("dirty_cpus=%s ", tmpstr);
- cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
+ cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
printk("cpu_affinity=%s\n", tmpstr);
printk(" pause_count=%d pause_flags=%lx\n",
atomic_read(&v->pause_count), v->pause_flags);
if ( step == CSCHED_BALANCE_NODE_AFFINITY )
{
cpumask_and(mask, CSCHED_DOM(vc->domain)->node_affinity_cpumask,
- vc->cpu_affinity);
+ vc->cpu_hard_affinity);
if ( unlikely(cpumask_empty(mask)) )
- cpumask_copy(mask, vc->cpu_affinity);
+ cpumask_copy(mask, vc->cpu_hard_affinity);
}
else /* step == CSCHED_BALANCE_CPU_AFFINITY */
- cpumask_copy(mask, vc->cpu_affinity);
+ cpumask_copy(mask, vc->cpu_hard_affinity);
}
static void burn_credits(struct csched_vcpu *svc, s_time_t now)
if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY
&& !__vcpu_has_node_affinity(new->vcpu,
- new->vcpu->cpu_affinity) )
+ new->vcpu->cpu_hard_affinity) )
continue;
/* Are there idlers suitable for new (for this balance step)? */
/* Store in cpus the mask of online cpus on which the domain can run */
online = cpupool_scheduler_cpumask(vc->domain->cpupool);
- cpumask_and(&cpus, vc->cpu_affinity, online);
+ cpumask_and(&cpus, vc->cpu_hard_affinity, online);
for_each_csched_balance_step( balance_step )
{
* or counter.
*/
if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY
- && !__vcpu_has_node_affinity(vc, vc->cpu_affinity) )
+ && !__vcpu_has_node_affinity(vc, vc->cpu_hard_affinity) )
continue;
csched_balance_cpumask(vc, balance_step, csched_balance_mask);
cpumask_t *online;
online = cpupool_scheduler_cpumask(v->domain->cpupool);
- cpumask_and(&online_affinity, v->cpu_affinity, online);
+ cpumask_and(&online_affinity, v->cpu_hard_affinity, online);
return cpumask_cycle(v->vcpu_id % cpumask_weight(&online_affinity) - 1,
&online_affinity);
}
*/
v->processor = processor;
if ( is_idle_domain(d) || d->is_pinned )
- cpumask_copy(v->cpu_affinity, cpumask_of(processor));
+ cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor));
else
- cpumask_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_hard_affinity);
/* Initialise the per-vcpu timers. */
init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- cpumask_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_hard_affinity);
lock = vcpu_schedule_lock_irq(v);
v->processor = new_p;
*/
if ( pick_called &&
(new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
- cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
+ cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
break;
if ( v->affinity_broken )
{
printk(XENLOG_DEBUG "Restoring affinity for %pv\n", v);
- cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+ cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved);
v->affinity_broken = 0;
}
unsigned long flags;
spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
- cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
+ cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
if ( cpumask_empty(&online_affinity) &&
- cpumask_test_cpu(cpu, v->cpu_affinity) )
+ cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
{
printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
if (system_state == SYS_STATE_suspend)
{
- cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+ cpumask_copy(v->cpu_hard_affinity_saved,
+ v->cpu_hard_affinity);
v->affinity_broken = 1;
}
- cpumask_setall(v->cpu_affinity);
+ cpumask_setall(v->cpu_hard_affinity);
}
if ( v->processor == cpu )
lock = vcpu_schedule_lock_irq(v);
- cpumask_copy(v->cpu_affinity, affinity);
+ cpumask_copy(v->cpu_hard_affinity, affinity);
/* Always ask the scheduler to re-evaluate placement
* when changing the affinity */
/* Save current VCPU affinity; force wakeup on *this* CPU only. */
wqv->wakeup_cpu = smp_processor_id();
- cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity);
+ cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
{
gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
{
/* Re-set VCPU affinity and re-enter the scheduler. */
struct vcpu *curr = current;
- cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity);
+ cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
{
gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
spinlock_t virq_lock;
/* Bitmask of CPUs on which this VCPU may run. */
- cpumask_var_t cpu_affinity;
+ cpumask_var_t cpu_hard_affinity;
/* Used to change affinity temporarily. */
- cpumask_var_t cpu_affinity_tmp;
+ cpumask_var_t cpu_hard_affinity_tmp;
/* Used to restore affinity across S3. */
- cpumask_var_t cpu_affinity_saved;
+ cpumask_var_t cpu_hard_affinity_saved;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_var_t vcpu_dirty_cpumask;
#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)
#define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain))
#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
- cpumask_weight((v)->cpu_affinity) == 1)
+ cpumask_weight((v)->cpu_hard_affinity) == 1)
#ifdef HAS_PASSTHROUGH
#define need_iommu(d) ((d)->need_iommu)
#else