From: kaf24@firebug.cl.cam.ac.uk Date: Fri, 13 Jan 2006 15:44:04 +0000 (+0100) Subject: Introduce a locking protocol for acquiring the 'scheduler X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~16541^2~42 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=fc5423d8cdf92d98f5eda9edc771cb546184ea93;p=xen.git Introduce a locking protocol for acquiring the 'scheduler lock' on a particular VCPU. Since this requires acquiring the approrpiate per-CPU lock, we must re-check the VCPU's current CPU binding after the lock is acquired. Signed-off-by: Keir Fraser --- diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c index df329dd982..9996e8bfac 100644 --- a/xen/common/sched_bvt.c +++ b/xen/common/sched_bvt.c @@ -98,9 +98,9 @@ static inline int __task_on_runqueue(struct vcpu *d) static void warp_timer_fn(void *data) { struct bvt_dom_info *inf = data; - unsigned int cpu = inf->domain->vcpu[0]->processor; - - spin_lock_irq(&schedule_data[cpu].schedule_lock); + struct vcpu *v = inf->domain->vcpu[0]; + + vcpu_schedule_lock_irq(v); inf->warp = 0; @@ -108,28 +108,28 @@ static void warp_timer_fn(void *data) if ( inf->warpu == 0 ) { inf->warpback = 0; - cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } set_timer(&inf->unwarp_timer, NOW() + inf->warpu); - spin_unlock_irq(&schedule_data[cpu].schedule_lock); + vcpu_schedule_unlock_irq(v); } static void unwarp_timer_fn(void *data) { struct bvt_dom_info *inf = data; - unsigned int cpu = inf->domain->vcpu[0]->processor; + struct vcpu *v = inf->domain->vcpu[0]; - spin_lock_irq(&schedule_data[cpu].schedule_lock); + vcpu_schedule_lock_irq(v); if ( inf->warpback ) { inf->warp = 1; - cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); + cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); } - spin_unlock_irq(&schedule_data[cpu].schedule_lock); + vcpu_schedule_unlock_irq(v); } static inline u32 calc_avt(struct vcpu *d, s_time_t now) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index a971d8fae1..f6668d3593 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -165,10 +165,10 @@ void vcpu_sleep_nosync(struct vcpu *v) { unsigned long flags; - spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_lock_irqsave(v, flags); if ( likely(!vcpu_runnable(v)) ) SCHED_OP(sleep, v); - spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } @@ -187,13 +187,13 @@ void vcpu_wake(struct vcpu *v) { unsigned long flags; - spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_lock_irqsave(v, flags); if ( likely(vcpu_runnable(v)) ) { SCHED_OP(wake, v); v->wokenup = NOW(); } - spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags); + vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } @@ -324,7 +324,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd) for_each_vcpu ( d, v ) { if ( v == current ) - spin_lock_irq(&schedule_data[smp_processor_id()].schedule_lock); + vcpu_schedule_lock_irq(v); else vcpu_pause(v); } @@ -336,7 +336,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd) for_each_vcpu ( d, v ) { if ( v == current ) - spin_unlock_irq(&schedule_data[smp_processor_id()].schedule_lock); + vcpu_schedule_unlock_irq(v); else vcpu_unpause(v); } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 7b94338391..d61d5c70d3 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -16,16 +16,47 @@ struct schedule_data { struct vcpu *curr; /* current task */ struct vcpu *idle; /* idle task for this cpu */ void *sched_priv; - struct timer s_timer; /* scheduling timer */ + struct timer s_timer; /* scheduling timer */ unsigned long tick; /* current periodic 'tick' */ #ifdef BUCKETS u32 hist[BUCKETS]; /* for scheduler latency histogram */ #endif } __cacheline_aligned; +extern struct schedule_data schedule_data[]; + +static inline void vcpu_schedule_lock(struct vcpu *v) +{ + unsigned int cpu; + + for ( ; ; ) + { + cpu = v->processor; + spin_lock(&schedule_data[cpu].schedule_lock); + if ( likely(v->processor == cpu) ) + break; + spin_unlock(&schedule_data[cpu].schedule_lock); + } +} + +#define vcpu_schedule_lock_irq(v) \ + do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 ) +#define vcpu_schedule_lock_irqsave(v, flags) \ + do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 ) + +static inline void vcpu_schedule_unlock(struct vcpu *v) +{ + spin_unlock(&schedule_data[v->processor].schedule_lock); +} + +#define vcpu_schedule_unlock_irq(v) \ + do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 ) +#define vcpu_schedule_unlock_irqrestore(v, flags) \ + do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 ) + struct task_slice { struct vcpu *task; - s_time_t time; + s_time_t time; }; struct scheduler { @@ -48,6 +79,4 @@ struct scheduler { void (*dump_cpu_state) (int); }; -extern struct schedule_data schedule_data[]; - #endif /* __XEN_SCHED_IF_H__ */