From: Keir Fraser Date: Mon, 17 May 2010 17:52:01 +0000 (+0100) Subject: cpupool: Fix CPU hotplug after recent changes. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~12136 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=2dfb040b49b03bfd89dd9c1546ced454096f18f7;p=xen.git cpupool: Fix CPU hotplug after recent changes. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 22cea30cfd..6140820a24 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -1019,14 +1019,14 @@ void __init __start_xen(unsigned long mbi_p) xsm_init(&initrdidx, mbi, initial_images_start); + timer_init(); + init_idle_domain(); trap_init(); rcu_init(); - timer_init(); - early_time_init(); arch_init_memory(); diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index e8301f466c..8c11f9efb1 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -27,9 +27,6 @@ cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */ static struct cpupool *cpupool_list; /* linked list, sorted by poolid */ -static int cpupool0_max_cpus; -integer_param("pool0_max_cpus", cpupool0_max_cpus); - static int cpupool_moving_cpu = -1; static struct cpupool *cpupool_cpu_moving = NULL; static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE; @@ -110,7 +107,7 @@ struct cpupool *cpupool_create(int poolid, char *sched) } *q = c; c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid; - if ( schedule_init_global(sched, &(c->sched)) ) + if ( (c->sched = scheduler_alloc(sched)) == NULL ) { spin_unlock(&cpupool_lock); cpupool_destroy(c); @@ -119,7 +116,7 @@ struct cpupool *cpupool_create(int poolid, char *sched) spin_unlock(&cpupool_lock); printk("Created cpupool %d with scheduler %s (%s)\n", c->cpupool_id, - c->sched.name, c->sched.opt_name); + c->sched->name, c->sched->opt_name); return c; } @@ -147,7 +144,7 @@ int cpupool_destroy(struct cpupool *c) *q = c->next; spin_unlock(&cpupool_lock); printk(XENLOG_DEBUG "cpupool_destroy(pool=%d)\n", c->cpupool_id); - schedule_deinit_global(&(c->sched)); + scheduler_free(c->sched); free_cpupool_struct(c); return 0; } @@ -172,29 +169,6 @@ static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) return 0; } -/* - * assign free physical cpus to a cpupool - * cpus assigned are unused cpus with lowest possible ids - * returns the number of cpus assigned - */ -int cpupool_assign_ncpu(struct cpupool *c, int ncpu) -{ - int i, n = 0; - - spin_lock(&cpupool_lock); - for_each_cpu_mask(i, cpupool_free_cpus) - { - if ( cpupool_assign_cpu_locked(c, i) == 0 ) - n++; - if ( n == ncpu ) - break; - } - spin_unlock(&cpupool_lock); - printk(XENLOG_DEBUG "cpupool_assign_ncpu(pool=%d,ncpu=%d) rc %d\n", - c->cpupool_id, ncpu, n); - return n; -} - static long cpupool_unassign_cpu_helper(void *info) { struct cpupool *c = (struct cpupool *)info; @@ -352,8 +326,7 @@ static void cpupool_cpu_add(unsigned int cpu) spin_lock(&cpupool_lock); cpu_clear(cpu, cpupool_locked_cpus); cpu_set(cpu, cpupool_free_cpus); - if ( cpupool0 != NULL ) - cpupool_assign_cpu_locked(cpupool0, cpu); + cpupool_assign_cpu_locked(cpupool0, cpu); spin_unlock(&cpupool_lock); } @@ -426,7 +399,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) if ( c == NULL ) break; op->cpupool_id = c->cpupool_id; - op->sched_id = c->sched.sched_id; + op->sched_id = c->sched->sched_id; op->n_dom = c->n_dom; ret = cpumask_to_xenctl_cpumap(&(op->cpumap), &(c->cpu_valid)); } @@ -599,27 +572,14 @@ static struct notifier_block cpu_nfb = { static int __init cpupool_presmp_init(void) { void *cpu = (void *)(long)smp_processor_id(); + cpupool0 = cpupool_create(0, NULL); + BUG_ON(cpupool0 == NULL); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); return 0; } presmp_initcall(cpupool_presmp_init); -static int __init cpupool_init(void) -{ - cpupool0 = cpupool_create(0, NULL); - BUG_ON(cpupool0 == NULL); - - if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) ) - cpupool0_max_cpus = num_online_cpus(); - - if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) ) - BUG(); - - return 0; -} -__initcall(cpupool_init); - /* * Local variables: * mode: C diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 6dbe7de4ae..6dff698a64 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -169,15 +169,8 @@ struct csched_private { uint32_t credit; int credit_balance; uint32_t runq_sort; - int ticker_active; }; - -/* - * Global variables - */ -static struct csched_private *csched_priv0 = NULL; - static void csched_tick(void *_cpu); static void csched_acct(void *dummy); @@ -351,17 +344,16 @@ csched_alloc_pdata(const struct scheduler *ops, int cpu) prv->credit += CSCHED_CREDITS_PER_ACCT; prv->ncpus++; cpu_set(cpu, prv->cpus); - if ( (prv->ncpus == 1) && (prv != csched_priv0) ) + if ( prv->ncpus == 1 ) { prv->master = cpu; - init_timer( &prv->master_ticker, csched_acct, prv, cpu); - prv->ticker_active = 2; + init_timer(&prv->master_ticker, csched_acct, prv, cpu); + set_timer(&prv->master_ticker, NOW() + + MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT); } init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu); - - if ( prv == csched_priv0 ) - prv->master = first_cpu(prv->cpus); + set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK)); INIT_LIST_HEAD(&spc->runq); spc->runq_sort_last = prv->runq_sort; @@ -1450,58 +1442,22 @@ csched_dump(const struct scheduler *ops) } static int -csched_init(struct scheduler *ops, int pool0) +csched_init(struct scheduler *ops) { struct csched_private *prv; prv = xmalloc(struct csched_private); if ( prv == NULL ) - return 1; + return -ENOMEM; + memset(prv, 0, sizeof(*prv)); - if ( pool0 ) - csched_priv0 = prv; ops->sched_data = prv; spin_lock_init(&prv->lock); INIT_LIST_HEAD(&prv->active_sdom); - prv->ncpus = 0; prv->master = UINT_MAX; - cpus_clear(prv->idlers); - prv->weight = 0U; - prv->credit = 0U; - prv->credit_balance = 0; - prv->runq_sort = 0U; - prv->ticker_active = (csched_priv0 == prv) ? 0 : 1; - - return 0; -} - -/* Tickers cannot be kicked until SMP subsystem is alive. */ -static __init int csched_start_tickers(void) -{ - struct csched_pcpu *spc; - unsigned int cpu; - - /* Is the credit scheduler initialised? */ - if ( (csched_priv0 == NULL) || (csched_priv0->ncpus == 0) ) - return 0; - - csched_priv0->ticker_active = 1; - - for_each_online_cpu ( cpu ) - { - spc = CSCHED_PCPU(cpu); - set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK)); - } - - init_timer( &csched_priv0->master_ticker, csched_acct, csched_priv0, - csched_priv0->master); - - set_timer( &csched_priv0->master_ticker, NOW() + - MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT ); return 0; } -__initcall(csched_start_tickers); static void csched_deinit(const struct scheduler *ops) @@ -1526,25 +1482,11 @@ static void csched_tick_resume(const struct scheduler *ops, unsigned int cpu) { struct csched_pcpu *spc; uint64_t now = NOW(); - struct csched_private *prv; - - prv = CSCHED_PRIV(ops); - if ( !prv->ticker_active ) - return; - spc = CSCHED_PCPU(cpu); set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK) - now % MILLISECS(CSCHED_MSECS_PER_TICK) ); - - if ( (prv->ticker_active == 2) && (prv->master == cpu) ) - { - set_timer( &prv->master_ticker, now + - MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT - - now % MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT); - prv->ticker_active = 1; - } } static struct csched_private _csched_priv; diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 3baf7147e5..d756747a11 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -1134,7 +1134,7 @@ make_runq_map(struct csched_private *prv) } static int -csched_init(struct scheduler *ops, int pool0) +csched_init(struct scheduler *ops) { int i; struct csched_private *prv; @@ -1145,7 +1145,7 @@ csched_init(struct scheduler *ops, int pool0) prv = xmalloc(struct csched_private); if ( prv == NULL ) - return 1; + return -ENOMEM; memset(prv, 0, sizeof(*prv)); ops->sched_data = prv; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index a718dfa6b5..9ea3137ffb 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -72,7 +72,7 @@ static struct scheduler __read_mostly ops; (( (opsptr)->fn != NULL ) ? (opsptr)->fn(opsptr, ##__VA_ARGS__ ) \ : (typeof((opsptr)->fn(opsptr, ##__VA_ARGS__)))0 ) -#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : &((_d)->cpupool->sched)) +#define DOM2OP(_d) (((_d)->cpupool == NULL) ? &ops : ((_d)->cpupool->sched)) #define VCPU2OP(_v) (DOM2OP((_v)->domain)) #define VCPU2ONLINE(_v) \ (((_v)->domain->cpupool == NULL) ? &cpu_online_map \ @@ -243,21 +243,21 @@ int sched_move_domain(struct domain *d, struct cpupool *c) void **vcpu_priv; void *domdata; - domdata = SCHED_OP(&(c->sched), alloc_domdata, d); + domdata = SCHED_OP(c->sched, alloc_domdata, d); if ( domdata == NULL ) return -ENOMEM; vcpu_priv = xmalloc_array(void *, d->max_vcpus); if ( vcpu_priv == NULL ) { - SCHED_OP(&(c->sched), free_domdata, domdata); + SCHED_OP(c->sched, free_domdata, domdata); return -ENOMEM; } memset(vcpu_priv, 0, d->max_vcpus * sizeof(void *)); for_each_vcpu ( d, v ) { - vcpu_priv[v->vcpu_id] = SCHED_OP(&(c->sched), alloc_vdata, v, domdata); + vcpu_priv[v->vcpu_id] = SCHED_OP(c->sched, alloc_vdata, v, domdata); if ( vcpu_priv[v->vcpu_id] == NULL ) { for_each_vcpu ( d, v ) @@ -266,7 +266,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) xfree(vcpu_priv[v->vcpu_id]); } xfree(vcpu_priv); - SCHED_OP(&(c->sched), free_domdata, domdata); + SCHED_OP(c->sched, free_domdata, domdata); return -ENOMEM; } } @@ -1133,7 +1133,7 @@ void __init scheduler_init(void) if ( strcmp(ops.opt_name, opt_sched) == 0 ) break; } - + if ( schedulers[i] == NULL ) { printk("Could not find scheduler: %s\n", opt_sched); @@ -1144,23 +1144,21 @@ void __init scheduler_init(void) register_cpu_notifier(&cpu_nfb); printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name); - if ( SCHED_OP(&ops, init, 1) ) + if ( SCHED_OP(&ops, init) ) panic("scheduler returned error on init\n"); } -/* switch scheduler on cpu */ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { unsigned long flags; struct vcpu *v; - void *vpriv = NULL; - void *ppriv; - void *ppriv_old; - struct scheduler *old_ops; - struct scheduler *new_ops; - - old_ops = per_cpu(scheduler, cpu); - new_ops = (c == NULL) ? &ops : &(c->sched); + void *ppriv, *ppriv_old, *vpriv = NULL; + struct scheduler *old_ops = per_cpu(scheduler, cpu); + struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; + + if ( old_ops == new_ops ) + return; + v = per_cpu(schedule_data, cpu).idle; ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); if ( c != NULL ) @@ -1192,11 +1190,14 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); } -/* init scheduler global data */ -int schedule_init_global(char *name, struct scheduler *sched) +struct scheduler *scheduler_alloc(char *name) { int i; const struct scheduler *data; + struct scheduler *sched; + + if ( name == NULL ) + return &ops; data = &ops; for ( i = 0; (schedulers[i] != NULL) && (name != NULL) ; i++ ) @@ -1207,14 +1208,24 @@ int schedule_init_global(char *name, struct scheduler *sched) break; } } + + if ( (sched = xmalloc(struct scheduler)) == NULL ) + return NULL; memcpy(sched, data, sizeof(*sched)); - return SCHED_OP(sched, init, 0); + if ( SCHED_OP(sched, init) != 0 ) + { + xfree(sched); + sched = NULL; + } + + return sched; } -/* deinitialize scheduler global data */ -void schedule_deinit_global(struct scheduler *sched) +void scheduler_free(struct scheduler *sched) { + BUG_ON(sched == &ops); SCHED_OP(sched, deinit); + xfree(sched); } void schedule_dump(struct cpupool *c) @@ -1223,7 +1234,7 @@ void schedule_dump(struct cpupool *c) struct scheduler *sched; cpumask_t *cpus; - sched = (c == NULL) ? &ops : &(c->sched); + sched = (c == NULL) ? &ops : c->sched; cpus = (c == NULL) ? &cpupool_free_cpus : &c->cpu_valid; printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name); SCHED_OP(sched, dump_settings); diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index af21e95e8f..c6ed8ba1b9 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -89,7 +89,7 @@ struct scheduler { unsigned int sched_id; /* ID for this scheduler */ void *sched_data; /* global data pointer */ - int (*init) (struct scheduler *, int); + int (*init) (struct scheduler *); void (*deinit) (const struct scheduler *); void (*free_vdata) (const struct scheduler *, void *); @@ -131,7 +131,7 @@ struct cpupool cpumask_t cpu_valid; /* all cpus assigned to pool */ struct cpupool *next; unsigned int n_dom; - struct scheduler sched; + struct scheduler *sched; }; const struct scheduler *scheduler_get_by_id(unsigned int id); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index a8e086521a..2b86b91c67 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -581,8 +581,8 @@ void cpu_init(void); struct scheduler; -int schedule_init_global(char *name, struct scheduler *sched); -void schedule_deinit_global(struct scheduler *sched); +struct scheduler *scheduler_alloc(char *name); +void scheduler_free(struct scheduler *sched); void schedule_cpu_switch(unsigned int cpu, struct cpupool *c); void vcpu_force_reschedule(struct vcpu *v); int cpu_disable_scheduler(unsigned int cpu);