* Do the actual movement of a vcpu from old to new CPU. Locks for *both*
* CPUs needs to have been taken already when calling this!
*/
-static void vcpu_move(struct vcpu *v, unsigned int old_cpu,
- unsigned int new_cpu)
+static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu)
{
+ unsigned int old_cpu = v->processor;
+
/*
* Transfer urgency status to new CPU before switching CPUs, as
* once the switch occurs, v->is_urgent is no longer protected by
v->processor = new_cpu;
}
+/*
+ * Move a vcpu from its current processor to a target new processor,
+ * without asking the scheduler to do any placement. This is intended
+ * for being called from special contexts, where things are quiet
+ * enough that no contention is supposed to happen (i.e., during
+ * shutdown or software suspend, like ACPI S3).
+ */
+static void vcpu_move_nosched(struct vcpu *v, unsigned int new_cpu)
+{
+ unsigned long flags;
+ spinlock_t *lock, *new_lock;
+
+ ASSERT(system_state == SYS_STATE_suspend);
+ ASSERT(!vcpu_runnable(v) && (atomic_read(&v->pause_count) ||
+ atomic_read(&v->domain->pause_count)));
+
+ lock = per_cpu(schedule_data, v->processor).schedule_lock;
+ new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+
+ sched_spin_lock_double(lock, new_lock, &flags);
+ ASSERT(new_cpu != v->processor);
+ vcpu_move_locked(v, new_cpu);
+ sched_spin_unlock_double(lock, new_lock, flags);
+
+ sched_move_irqs(v);
+}
+
static void vcpu_migrate(struct vcpu *v)
{
unsigned long flags;
return;
}
- vcpu_move(v, old_cpu, new_cpu);
+ vcpu_move_locked(v, new_cpu);
sched_spin_unlock_double(old_lock, new_lock, flags);
struct vcpu *v;
struct cpupool *c;
cpumask_t online_affinity;
- int ret = 0;
+ unsigned int new_cpu;
+ int ret = 0;
c = per_cpu(cpupool, cpu);
if ( c == NULL )
cpumask_setall(v->cpu_hard_affinity);
}
- if ( v->processor == cpu )
+ if ( v->processor != cpu )
{
- set_bit(_VPF_migrating, &v->pause_flags);
+ /* The vcpu is not on this cpu, so we can move on. */
vcpu_schedule_unlock_irqrestore(lock, flags, v);
- vcpu_sleep_nosync(v);
- vcpu_migrate(v);
+ continue;
+ }
+
+ /* If it is on this cpu, we must send it away. */
+ if ( unlikely(system_state == SYS_STATE_suspend) )
+ {
+ vcpu_schedule_unlock_irqrestore(lock, flags, v);
+
+ /*
+ * If we are doing a shutdown/suspend, it is not necessary to
+ * ask the scheduler to chime in. In fact:
+ * * there is no reason for it: the end result we are after
+ * is just 'all the vcpus on the boot pcpu, and no vcpu
+ * anywhere else', so let's just go for it;
+ * * it's wrong, for cpupools with only non-boot pcpus, as
+ * the scheduler would always fail to send the vcpus away
+ * from the last online (non boot) pcpu!
+ *
+ * Therefore, in the shutdown/suspend case, we just pick up
+ * one (still) online pcpu. Note that, at this stage, all
+ * domains (including dom0) have been paused already, so we
+ * do not expect any vcpu activity at all.
+ */
+ cpumask_andnot(&online_affinity, &cpu_online_map,
+ cpumask_of(cpu));
+ BUG_ON(cpumask_empty(&online_affinity));
+ /*
+ * As boot cpu is, usually, pcpu #0, using cpumask_first()
+ * will make us converge quicker.
+ */
+ new_cpu = cpumask_first(&online_affinity);
+ vcpu_move_nosched(v, new_cpu);
}
else
+ {
+ /*
+ * OTOH, if the system is still live, and we are here because
+ * we are doing some cpupool manipulations:
+ * * we want to call the scheduler, and let it re-evaluation
+ * the placement of the vcpu, taking into account the new
+ * cpupool configuration;
+ * * the scheduler will always fine a suitable solution, or
+ * things would have failed before getting in here.
+ */
+ set_bit(_VPF_migrating, &v->pause_flags);
vcpu_schedule_unlock_irqrestore(lock, flags, v);
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
- /*
- * A vcpu active in the hypervisor will not be migratable.
- * The caller should try again after releasing and reaquiring
- * all locks.
- */
- if ( v->processor == cpu )
- ret = -EAGAIN;
+ /*
+ * The only caveat, in this case, is that if a vcpu active in
+ * the hypervisor isn't migratable. In this case, the caller
+ * should try again after releasing and reaquiring all locks.
+ */
+ if ( v->processor == cpu )
+ ret = -EAGAIN;
+ }
}
-
domain_update_node_affinity(d);
}