csched_alloc_pdata(const struct scheduler *ops, int cpu)
{
struct csched_pcpu *spc;
- struct csched_private *prv = CSCHED_PRIV(ops);
- unsigned long flags;
/* Allocate per-PCPU info */
spc = xzalloc(struct csched_pcpu);
return ERR_PTR(-ENOMEM);
}
+ return spc;
+}
+
+static void
+csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
+{
+ struct csched_private *prv = CSCHED_PRIV(ops);
+ struct csched_pcpu * const spc = pdata;
+ unsigned long flags;
+
+ /* cpu data needs to be allocated, but STILL uninitialized */
+ ASSERT(spc && spc->runq.next == NULL && spc->runq.prev == NULL);
+
spin_lock_irqsave(&prv->lock, flags);
/* Initialize/update system-wide config */
INIT_LIST_HEAD(&spc->runq);
spc->runq_sort_last = prv->runq_sort;
spc->idle_bias = nr_cpu_ids - 1;
- if ( per_cpu(schedule_data, cpu).sched_priv == NULL )
- per_cpu(schedule_data, cpu).sched_priv = spc;
/* Start off idling... */
BUG_ON(!is_idle_vcpu(curr_on_cpu(cpu)));
cpumask_set_cpu(cpu, prv->idlers);
spin_unlock_irqrestore(&prv->lock, flags);
-
- return spc;
}
#ifndef NDEBUG
.alloc_vdata = csched_alloc_vdata,
.free_vdata = csched_free_vdata,
.alloc_pdata = csched_alloc_pdata,
+ .init_pdata = csched_init_pdata,
.free_pdata = csched_free_pdata,
.alloc_domdata = csched_alloc_domdata,
.free_domdata = csched_free_domdata,
cpumask_clear_cpu(rqi, &prv->active_queues);
}
-static void init_pcpu(const struct scheduler *ops, int cpu)
+static void
+csched2_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
{
unsigned rqi;
unsigned long flags;
spin_lock_irqsave(&prv->lock, flags);
- if ( cpumask_test_cpu(cpu, &prv->initialized) )
- {
- printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
- spin_unlock_irqrestore(&prv->lock, flags);
- return;
- }
+ ASSERT(!cpumask_test_cpu(cpu, &prv->initialized));
/* Figure out which runqueue to put it in */
rqi = 0;
return;
}
-static void *
-csched2_alloc_pdata(const struct scheduler *ops, int cpu)
-{
- /* Check to see if the cpu is online yet */
- /* Note: cpu 0 doesn't get a STARTING callback */
- if ( cpu == 0 || cpu_to_socket(cpu) != XEN_INVALID_SOCKET_ID )
- init_pcpu(ops, cpu);
- else
- printk("%s: cpu %d not online yet, deferring initializatgion\n",
- __func__, cpu);
-
- return NULL;
-}
-
static void
csched2_free_pdata(const struct scheduler *ops, void *pcpu, int cpu)
{
spin_lock_irqsave(&prv->lock, flags);
- BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
+ ASSERT(cpumask_test_cpu(cpu, &prv->initialized));
/* Find the old runqueue and remove this cpu from it */
rqi = prv->runq_map[cpu];
return;
}
-static int
-csched2_cpu_starting(int cpu)
-{
- struct scheduler *ops;
-
- /* Hope this is safe from cpupools switching things around. :-) */
- ops = per_cpu(scheduler, cpu);
-
- if ( ops->alloc_pdata == csched2_alloc_pdata )
- init_pcpu(ops, cpu);
-
- return NOTIFY_DONE;
-}
-
-static int cpu_credit2_callback(
- struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch ( action )
- {
- case CPU_STARTING:
- csched2_cpu_starting(cpu);
- break;
- default:
- break;
- }
-
- return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
-}
-
-static struct notifier_block cpu_credit2_nfb = {
- .notifier_call = cpu_credit2_callback
-};
-
-static int
-csched2_global_init(void)
-{
- register_cpu_notifier(&cpu_credit2_nfb);
- return 0;
-}
-
static int
csched2_init(struct scheduler *ops)
{
.dump_cpu_state = csched2_dump_pcpu,
.dump_settings = csched2_dump,
- .global_init = csched2_global_init,
.init = csched2_init,
.deinit = csched2_deinit,
.alloc_vdata = csched2_alloc_vdata,
.free_vdata = csched2_free_vdata,
- .alloc_pdata = csched2_alloc_pdata,
+ .init_pdata = csched2_init_pdata,
.free_pdata = csched2_free_pdata,
.alloc_domdata = csched2_alloc_domdata,
.free_domdata = csched2_free_domdata,
* Point per_cpu spinlock to the global system lock;
* All cpu have same global system lock
*/
-static void *
-rt_alloc_pdata(const struct scheduler *ops, int cpu)
+static void
+rt_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
{
struct rt_private *prv = rt_priv(ops);
spinlock_t *old_lock;
/* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */
spin_unlock_irqrestore(old_lock, flags);
+}
+
+static void *
+rt_alloc_pdata(const struct scheduler *ops, int cpu)
+{
+ struct rt_private *prv = rt_priv(ops);
if ( !alloc_cpumask_var(&_cpumask_scratch[cpu]) )
return ERR_PTR(-ENOMEM);
.deinit = rt_deinit,
.alloc_pdata = rt_alloc_pdata,
.free_pdata = rt_free_pdata,
+ .init_pdata = rt_init_pdata,
.alloc_domdata = rt_alloc_domdata,
.free_domdata = rt_free_domdata,
.init_domain = rt_dom_init,