From: Keir Fraser Date: Fri, 24 Dec 2010 08:26:59 +0000 (+0000) Subject: scheduler: Introduce pcpu_schedule_lock X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~11036 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=d2f6b6016990b570c6782e1639ca1c0b07013b59;p=xen.git scheduler: Introduce pcpu_schedule_lock Many places in Xen, particularly schedule.c, grab the per-cpu spinlock directly, rather than through vcpu_schedule_lock(). Since the lock pointer may change between the time it's read and the time the lock is successfully acquired, we need to check after acquiring the lock to make sure that the pcpu's lock hasn't changed, due to cpu initialization or cpupool activity. Signed-off-by: George Dunlap --- diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c index 9751a12d1f..d92c7c73ff 100644 --- a/xen/arch/ia64/vmx/vmmu.c +++ b/xen/arch/ia64/vmx/vmmu.c @@ -394,7 +394,7 @@ static void ptc_ga_remote_func (void *varg) if (cpu != current->processor) return; local_irq_save(flags); - if (!spin_trylock(per_cpu(schedule_data, cpu).schedule_lock)) + if (!pcpu_schedule_trylock(cpu)) goto bail2; if (v->processor != cpu) goto bail1; @@ -416,7 +416,7 @@ static void ptc_ga_remote_func (void *varg) ia64_dv_serialize_data(); args->vcpu = NULL; bail1: - spin_unlock(per_cpu(schedule_data, cpu).schedule_lock); + pcpu_schedule_unlock(cpu); bail2: local_irq_restore(flags); } diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index aad47706a9..bfe20d30d0 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -905,7 +905,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) spc->runq_sort_last = sort_epoch; - spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_lock_irqsave(cpu, flags); runq = &spc->runq; elem = runq->next; @@ -930,7 +930,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) elem = next; } - spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_unlock_irqrestore(cpu, flags); } static void @@ -1259,7 +1259,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * cause a deadlock if the peer CPU is also load balancing and trying * to lock this CPU. */ - if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) ) + if ( !pcpu_schedule_trylock(peer_cpu) ) { CSCHED_STAT_CRANK(steal_trylock_failed); continue; @@ -1269,7 +1269,7 @@ csched_load_balance(struct csched_private *prv, int cpu, * Any work over there to steal? */ speer = csched_runq_steal(peer_cpu, cpu, snext->pri); - spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock); + pcpu_schedule_unlock(peer_cpu); if ( speer != NULL ) { *stolen = 1; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 812b0d1a4f..69996b2f60 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -424,7 +424,8 @@ static void vcpu_migrate(struct vcpu *v) atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count); } - /* Switch to new CPU, then unlock old CPU. */ + /* Switch to new CPU, then unlock old CPU. This is safe because + * the lock pointer cant' change while the current lock is held. */ v->processor = new_cpu; spin_unlock_irqrestore( per_cpu(schedule_data, old_cpu).schedule_lock, flags); @@ -1302,7 +1303,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) ppriv = SCHED_OP(new_ops, alloc_pdata, cpu); vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv); - spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_lock_irqsave(cpu, flags); SCHED_OP(old_ops, tick_suspend, cpu); vpriv_old = idle->sched_priv; @@ -1313,7 +1314,7 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(new_ops, tick_resume, cpu); SCHED_OP(new_ops, insert_vcpu, idle); - spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags); + pcpu_schedule_unlock_irqrestore(cpu, flags); SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); @@ -1369,10 +1370,10 @@ void schedule_dump(struct cpupool *c) for_each_cpu_mask (i, *cpus) { - spin_lock(per_cpu(schedule_data, i).schedule_lock); + pcpu_schedule_lock(i); printk("CPU[%02d] ", i); SCHED_OP(sched, dump_cpu_state, i); - spin_unlock(per_cpu(schedule_data, i).schedule_lock); + pcpu_schedule_unlock(i); } } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 84f7f5a1c8..e8f0262a6b 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -39,6 +39,57 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data); DECLARE_PER_CPU(struct scheduler *, scheduler); DECLARE_PER_CPU(struct cpupool *, cpupool); +static inline spinlock_t * pcpu_schedule_lock(int cpu) +{ + spinlock_t * lock=NULL; + + for ( ; ; ) + { + /* The per_cpu(v->processor) may also change, if changing + * cpu pool also changes the scheduler lock. Retry + * until they match. + */ + lock=per_cpu(schedule_data, cpu).schedule_lock; + + spin_lock(lock); + if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) + break; + spin_unlock(lock); + } + return lock; +} + +static inline int pcpu_schedule_trylock(int cpu) +{ + spinlock_t * lock=NULL; + + lock=per_cpu(schedule_data, cpu).schedule_lock; + if ( ! spin_trylock(lock) ) + return 0; + if ( lock == per_cpu(schedule_data, cpu).schedule_lock ) + return 1; + else + { + spin_unlock(lock); + return 0; + } +} + +#define pcpu_schedule_lock_irq(p) \ + do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 ) +#define pcpu_schedule_lock_irqsave(p, flags) \ + do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 ) + +static inline void pcpu_schedule_unlock(int cpu) +{ + spin_unlock(per_cpu(schedule_data, cpu).schedule_lock); +} + +#define pcpu_schedule_unlock_irq(p) \ + do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 ) +#define pcpu_schedule_unlock_irqrestore(p, flags) \ + do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 ) + static inline void vcpu_schedule_lock(struct vcpu *v) { spinlock_t * lock;