It is ok to use just cpumask_scratch in csched_runq_steal().
In fact, the cpu parameter comes from the cpu local variable
in csched_load_balance(), which in turn comes from cpu in
csched_schedule(), which is smp_processor_id().
While there, also:
- move the comment about cpumask_scratch in the header
where the scratch space is declared;
- spell more clearly (in that same comment) what are the
serialization rules.
No functional change intended.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
&& !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
continue;
- csched_balance_cpumask(vc, balance_step, cpumask_scratch_cpu(cpu));
- if ( __csched_vcpu_is_migrateable(vc, cpu,
- cpumask_scratch_cpu(cpu)) )
+ csched_balance_cpumask(vc, balance_step, cpumask_scratch);
+ if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
{
/* We got a candidate. Grab it! */
TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
DEFINE_PER_CPU(struct schedule_data, schedule_data);
DEFINE_PER_CPU(struct scheduler *, scheduler);
-/*
- * Scratch space, for avoiding having too many cpumask_var_t on the stack.
- * Properly serializing access, if necessary, is responsibility of each
- * scheduler (typically, one can expect this to be protected by the per pCPU
- * or per runqueue lock).
- */
+/* Scratch space for cpumasks. */
DEFINE_PER_CPU(cpumask_t, cpumask_scratch);
extern const struct scheduler *__start_schedulers_array[], *__end_schedulers_array[];
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
+/*
+ * Scratch space, for avoiding having too many cpumask_t on the stack.
+ * Within each scheduler, when using the scratch mask of one pCPU:
+ * - the pCPU must belong to the scheduler,
+ * - the caller must own the per-pCPU scheduler lock (a.k.a. runqueue
+ * lock).
+ */
DECLARE_PER_CPU(cpumask_t, cpumask_scratch);
#define cpumask_scratch (&this_cpu(cpumask_scratch))
#define cpumask_scratch_cpu(c) (&per_cpu(cpumask_scratch, c))