{
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct migrate_info *info = v->arch.continue_info;
+ cpumask_t mask = info->saved_affinity;
regs->eax = info->func(info->data);
v->arch.schedule_tail = info->saved_schedule_tail;
- v->cpu_affinity = info->saved_affinity;
v->arch.continue_info = NULL;
xfree(info);
- vcpu_set_affinity(v, &v->cpu_affinity);
+ vcpu_unlock_affinity(v, &mask);
schedule_tail(v);
}
{
struct vcpu *v = current;
struct migrate_info *info;
- cpumask_t mask = cpumask_of_cpu(cpu);
int rc;
if ( cpu == smp_processor_id() )
info->func = func;
info->data = data;
info->saved_schedule_tail = v->arch.schedule_tail;
- info->saved_affinity = v->cpu_affinity;
+ info->saved_affinity = cpumask_of_cpu(cpu);
v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
v->arch.continue_info = info;
- rc = vcpu_set_affinity(v, &mask);
+ rc = vcpu_lock_affinity(v, &info->saved_affinity);
if ( rc )
{
v->arch.schedule_tail = info->saved_schedule_tail;
}
}
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
+static int __vcpu_set_affinity(
+ struct vcpu *v, cpumask_t *affinity,
+ bool_t old_lock_status, bool_t new_lock_status)
{
- cpumask_t online_affinity;
-
- if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
- return -EINVAL;
+ cpumask_t online_affinity, old_affinity;
cpus_and(online_affinity, *affinity, cpu_online_map);
if ( cpus_empty(online_affinity) )
vcpu_schedule_lock_irq(v);
+ if ( v->affinity_locked != old_lock_status )
+ {
+ BUG_ON(!v->affinity_locked);
+ vcpu_schedule_unlock_irq(v);
+ return -EBUSY;
+ }
+
+ v->affinity_locked = new_lock_status;
+
+ old_affinity = v->cpu_affinity;
v->cpu_affinity = *affinity;
+ *affinity = old_affinity;
if ( !cpu_isset(v->processor, v->cpu_affinity) )
set_bit(_VPF_migrating, &v->pause_flags);
return 0;
}
+int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
+ return -EINVAL;
+ return __vcpu_set_affinity(v, affinity, 0, 0);
+}
+
+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ return __vcpu_set_affinity(v, affinity, 0, 1);
+}
+
+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
+{
+ cpumask_t online_affinity;
+
+ /* Do not fail if no CPU in old affinity mask is online. */
+ cpus_and(online_affinity, *affinity, cpu_online_map);
+ if ( cpus_empty(online_affinity) )
+ *affinity = cpu_online_map;
+
+ if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
+ BUG();
+}
+
/* Block the currently-executing domain until a pertinent event occurs. */
static long do_block(void)
{
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
+ /* VCPU affinity is temporarily locked from controller changes? */
+ bool_t affinity_locked;
unsigned long pause_flags;
atomic_t pause_count;
void vcpu_force_reschedule(struct vcpu *v);
int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
+int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
+void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);