if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
(cpu != cpupool_moving_cpu) )
{
- for_each_domain(d)
+ rcu_read_lock(&domlist_read_lock);
+ for_each_domain_in_cpupool(d, c)
{
- if ( d->cpupool != c )
- continue;
if ( !d->is_dying )
{
ret = -EBUSY;
}
cpupool0->n_dom++;
}
+ rcu_read_unlock(&domlist_read_lock);
if ( ret )
goto out;
}
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
{
+ if ( (d->cpupool ? d->cpupool->sched : &sched_sedf_def) != ops )
+ continue;
for_each_vcpu(d, ed)
{
if ( !__task_on_queue(ed) && (ed->processor == i) )
/* Sum across all weights. */
rcu_read_lock(&domlist_read_lock);
- for_each_domain( d )
+ for_each_domain_in_cpupool( d, c )
{
- if ( c != d->cpupool )
- continue;
for_each_vcpu( d, p )
{
if ( (cpu = p->processor) >= nr_cpus )
/* Adjust all slices (and periods) to the new weight. */
rcu_read_lock(&domlist_read_lock);
- for_each_domain( d )
+ for_each_domain_in_cpupool( d, c )
{
for_each_vcpu ( d, p )
{
extern struct domain *domain_list;
/* Caller must hold the domlist_read_lock or domlist_update_lock. */
+static inline struct domain *first_domain_in_cpupool( struct cpupool *c)
+{
+ struct domain *d;
+ for (d = rcu_dereference(domain_list); d && d->cpupool != c;
+ d = rcu_dereference(d->next_in_list));
+ return d;
+}
+static inline struct domain *next_domain_in_cpupool(
+ struct domain *d, struct cpupool *c)
+{
+ for (d = rcu_dereference(d->next_in_list); d && d->cpupool != c;
+ d = rcu_dereference(d->next_in_list));
+ return d;
+}
+
#define for_each_domain(_d) \
for ( (_d) = rcu_dereference(domain_list); \
(_d) != NULL; \
(_d) = rcu_dereference((_d)->next_in_list )) \
+#define for_each_domain_in_cpupool(_d,_c) \
+ for ( (_d) = first_domain_in_cpupool(_c); \
+ (_d) != NULL; \
+ (_d) = next_domain_in_cpupool((_d), (_c)))
+
#define for_each_vcpu(_d,_v) \
for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \
(_v) != NULL; \