#include <xen/trace.h>
+/*
+ * Locking:
+ * - Scheduler-lock (a.k.a. runqueue lock):
+ * + is per-runqueue, and there is one runqueue per-cpu;
+ * + serializes all runqueue manipulation operations;
+ * - Private data lock (a.k.a. private scheduler lock):
+ * + serializes accesses to the scheduler global state (weight,
+ * credit, balance_credit, etc);
+ * + serializes updates to the domains' scheduling parameters.
+ *
+ * Ordering is "private lock always comes first":
+ * + if we need both locks, we must acquire the private
+ * scheduler lock for first;
+ * + if we already own a runqueue lock, we must never acquire
+ * the private scheduler lock.
+ */
+
/*
* Basic constants
*/
csched_dump_pcpu(const struct scheduler *ops, int cpu)
{
struct list_head *runq, *iter;
+ struct csched_private *prv = CSCHED_PRIV(ops);
struct csched_pcpu *spc;
struct csched_vcpu *svc;
+ spinlock_t *lock = lock;
+ unsigned long flags;
int loop;
#define cpustr keyhandler_scratch
+ /*
+ * We need both locks:
+ * - csched_dump_vcpu() wants to access domains' scheduling
+ * parameters, which are protected by the private scheduler lock;
+ * - we scan through the runqueue, so we need the proper runqueue
+ * lock (the one of the runqueue of this cpu).
+ */
+ spin_lock_irqsave(&prv->lock, flags);
+ lock = pcpu_schedule_lock(cpu);
+
spc = CSCHED_PCPU(cpu);
runq = &spc->runq;
csched_dump_vcpu(svc);
}
}
+
+ pcpu_schedule_unlock(lock, cpu);
+ spin_unlock_irqrestore(&prv->lock, flags);
#undef cpustr
}
int loop;
unsigned long flags;
- spin_lock_irqsave(&(prv->lock), flags);
+ spin_lock_irqsave(&prv->lock, flags);
#define idlers_buf keyhandler_scratch
list_for_each( iter_svc, &sdom->active_vcpu )
{
struct csched_vcpu *svc;
+ spinlock_t *lock;
+
svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem);
+ lock = vcpu_schedule_lock(svc->vcpu);
printk("\t%3d: ", ++loop);
csched_dump_vcpu(svc);
+
+ vcpu_schedule_unlock(lock, svc->vcpu);
}
}
#undef idlers_buf
- spin_unlock_irqrestore(&(prv->lock), flags);
+ spin_unlock_irqrestore(&prv->lock, flags);
}
static int
* credit2 wiki page:
* http://wiki.xen.org/wiki/Credit2_Scheduler_Development
* TODO:
- * + Immediate bug-fixes
- * - Do per-runqueue, grab proper lock for dump debugkey
* + Multiple sockets
* - Detect cpu layout and make runqueue map, one per L2 (make_runq_map())
* - Simple load balancer / runqueue assignment
static void
csched2_dump_pcpu(const struct scheduler *ops, int cpu)
{
+ struct csched2_private *prv = CSCHED2_PRIV(ops);
struct list_head *runq, *iter;
struct csched2_vcpu *svc;
+ unsigned long flags;
+ spinlock_t *lock;
int loop;
char cpustr[100];
- /* FIXME: Do locking properly for access to runqueue structures */
+ /*
+ * We need both locks:
+ * - csched2_dump_vcpu() wants to access domains' scheduling
+ * parameters, which are protected by the private scheduler lock;
+ * - we scan through the runqueue, so we need the proper runqueue
+ * lock (the one of the runqueue this cpu is associated to).
+ */
+ spin_lock_irqsave(&prv->lock, flags);
+ lock = per_cpu(schedule_data, cpu).schedule_lock;
+ spin_lock(lock);
runq = &RQD(ops, cpu)->runq;
csched2_dump_vcpu(svc);
}
}
+
+ spin_unlock(lock);
+ spin_unlock_irqrestore(&prv->lock, flags);
}
static void
{
struct list_head *iter_sdom, *iter_svc;
struct csched2_private *prv = CSCHED2_PRIV(ops);
+ unsigned long flags;
int i, loop;
+ /* We need the private lock as we access global scheduler data
+ * and (below) the list of active domains. */
+ spin_lock_irqsave(&prv->lock, flags);
+
printk("Active queues: %d\n"
"\tdefault-weight = %d\n",
cpumask_weight(&prv->active_queues),
fraction);
}
- /* FIXME: Locking! */
printk("Domain info:\n");
loop = 0;
struct csched2_dom *sdom;
sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem);
- printk("\tDomain: %d w %d v %d\n\t",
- sdom->dom->domain_id,
- sdom->weight,
- sdom->nr_vcpus);
+ printk("\tDomain: %d w %d v %d\n\t",
+ sdom->dom->domain_id,
+ sdom->weight,
+ sdom->nr_vcpus);
list_for_each( iter_svc, &sdom->vcpu )
{
struct csched2_vcpu *svc;
+ spinlock_t *lock;
+
svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem);
+ lock = vcpu_schedule_lock(svc->vcpu);
printk("\t%3d: ", ++loop);
csched2_dump_vcpu(svc);
+
+ vcpu_schedule_unlock(lock, svc->vcpu);
}
}
+
+ spin_unlock_irqrestore(&prv->lock, flags);
}
static void activate_runqueue(struct csched2_private *prv, int rqi)
static void
rt_dump_pcpu(const struct scheduler *ops, int cpu)
{
- struct rt_vcpu *svc = rt_vcpu(curr_on_cpu(cpu));
+ struct rt_private *prv = rt_priv(ops);
+ unsigned long flags;
- rt_dump_vcpu(ops, svc);
+ spin_lock_irqsave(&prv->lock, flags);
+ rt_dump_vcpu(ops, rt_vcpu(curr_on_cpu(cpu)));
+ spin_unlock_irqrestore(&prv->lock, flags);
}
static void
/* Dumps all domains on the specified cpu */
static void sedf_dump_cpu_state(const struct scheduler *ops, int i)
{
+ struct sedf_priv_info *prv = SEDF_PRIV(ops);
struct list_head *list, *queue, *tmp;
struct sedf_vcpu_info *d_inf;
struct domain *d;
struct vcpu *ed;
+ spinlock_t *lock;
+ unsigned long flags;
int loop = 0;
+ /*
+ * We need both locks, as:
+ * - we access domains' parameters, which are protected by the
+ * private scheduler lock;
+ * - we scan through the various queues, so we need the proper
+ * runqueue lock (i.e., the one for this pCPU).
+ */
+ spin_lock_irqsave(&prv->lock, flags);
+ lock = pcpu_schedule_lock(i);
+
printk("now=%"PRIu64"\n",NOW());
queue = RUNQ(i);
printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
}
}
rcu_read_unlock(&domlist_read_lock);
+
+ pcpu_schedule_unlock(lock, i);
+ spin_unlock_irqrestore(&prv->lock, flags);
}
struct scheduler *sched;
cpumask_t *cpus;
+ /* Locking, if necessary, must be handled withing each scheduler */
+
sched = (c == NULL) ? &ops : c->sched;
cpus = cpupool_scheduler_cpumask(c);
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
for_each_cpu (i, cpus)
{
- spinlock_t *lock = pcpu_schedule_lock(i);
-
printk("CPU[%02d] ", i);
SCHED_OP(sched, dump_cpu_state, i);
- pcpu_schedule_unlock(lock, i);
}
}