Introduce a locking protocol for acquiring the 'scheduler
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 13 Jan 2006 15:44:04 +0000 (16:44 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 13 Jan 2006 15:44:04 +0000 (16:44 +0100)
lock' on a particular VCPU. Since this requires acquiring
the approrpiate per-CPU lock, we must re-check the VCPU's
current CPU binding after the lock is acquired.

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/common/sched_bvt.c
xen/common/schedule.c
xen/include/xen/sched-if.h

index df329dd982f89578be149dc45ac9d9f80434658f..9996e8bfac2c757c62af0962d04663c07d0ead68 100644 (file)
@@ -98,9 +98,9 @@ static inline int __task_on_runqueue(struct vcpu *d)
 static void warp_timer_fn(void *data)
 {
     struct bvt_dom_info *inf = data;
-    unsigned int cpu = inf->domain->vcpu[0]->processor;
-    
-    spin_lock_irq(&schedule_data[cpu].schedule_lock);
+    struct vcpu *v = inf->domain->vcpu[0];
+
+    vcpu_schedule_lock_irq(v);
 
     inf->warp = 0;
 
@@ -108,28 +108,28 @@ static void warp_timer_fn(void *data)
     if ( inf->warpu == 0 )
     {
         inf->warpback = 0;
-        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);   
+        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);   
     }
     
     set_timer(&inf->unwarp_timer, NOW() + inf->warpu);
 
-    spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+    vcpu_schedule_unlock_irq(v);
 }
 
 static void unwarp_timer_fn(void *data)
 {
     struct bvt_dom_info *inf = data;
-    unsigned int cpu = inf->domain->vcpu[0]->processor;
+    struct vcpu *v = inf->domain->vcpu[0];
 
-    spin_lock_irq(&schedule_data[cpu].schedule_lock);
+    vcpu_schedule_lock_irq(v);
 
     if ( inf->warpback )
     {
         inf->warp = 1;
-        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);   
+        cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);   
     }
      
-    spin_unlock_irq(&schedule_data[cpu].schedule_lock);
+    vcpu_schedule_unlock_irq(v);
 }
 
 static inline u32 calc_avt(struct vcpu *d, s_time_t now)
index a971d8fae1cc344af0c3a75b0af82cc23e478d94..f6668d359359578fab9b419d98bd4bc7016435bb 100644 (file)
@@ -165,10 +165,10 @@ void vcpu_sleep_nosync(struct vcpu *v)
 {
     unsigned long flags;
 
-    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
+    vcpu_schedule_lock_irqsave(v, flags);
     if ( likely(!vcpu_runnable(v)) )
         SCHED_OP(sleep, v);
-    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
+    vcpu_schedule_unlock_irqrestore(v, flags);
 
     TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
 }
@@ -187,13 +187,13 @@ void vcpu_wake(struct vcpu *v)
 {
     unsigned long flags;
 
-    spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
+    vcpu_schedule_lock_irqsave(v, flags);
     if ( likely(vcpu_runnable(v)) )
     {
         SCHED_OP(wake, v);
         v->wokenup = NOW();
     }
-    spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
+    vcpu_schedule_unlock_irqrestore(v, flags);
 
     TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
 }
@@ -324,7 +324,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
     for_each_vcpu ( d, v )
     {
         if ( v == current )
-            spin_lock_irq(&schedule_data[smp_processor_id()].schedule_lock);
+            vcpu_schedule_lock_irq(v);
         else
             vcpu_pause(v);
     }
@@ -336,7 +336,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
     for_each_vcpu ( d, v )
     {
         if ( v == current )
-            spin_unlock_irq(&schedule_data[smp_processor_id()].schedule_lock);
+            vcpu_schedule_unlock_irq(v);
         else
             vcpu_unpause(v);
     }
index 7b94338391bce2d48940495eb8e6b3f44ec0a32e..d61d5c70d3960e01cc489547be2c757103b80081 100644 (file)
@@ -16,16 +16,47 @@ struct schedule_data {
     struct vcpu        *curr;           /* current task                    */
     struct vcpu        *idle;           /* idle task for this cpu          */
     void               *sched_priv;
-    struct timer     s_timer;        /* scheduling timer                */
+    struct timer        s_timer;        /* scheduling timer                */
     unsigned long       tick;           /* current periodic 'tick'         */
 #ifdef BUCKETS
     u32                 hist[BUCKETS];  /* for scheduler latency histogram */
 #endif
 } __cacheline_aligned;
 
+extern struct schedule_data schedule_data[];
+
+static inline void vcpu_schedule_lock(struct vcpu *v)
+{
+    unsigned int cpu;
+
+    for ( ; ; )
+    {
+        cpu = v->processor;
+        spin_lock(&schedule_data[cpu].schedule_lock);
+        if ( likely(v->processor == cpu) )
+            break;
+        spin_unlock(&schedule_data[cpu].schedule_lock);
+    }
+}
+
+#define vcpu_schedule_lock_irq(v) \
+    do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
+#define vcpu_schedule_lock_irqsave(v, flags) \
+    do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+
+static inline void vcpu_schedule_unlock(struct vcpu *v)
+{
+    spin_unlock(&schedule_data[v->processor].schedule_lock);
+}
+
+#define vcpu_schedule_unlock_irq(v) \
+    do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
+#define vcpu_schedule_unlock_irqrestore(v, flags) \
+    do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
+
 struct task_slice {
     struct vcpu *task;
-    s_time_t            time;
+    s_time_t     time;
 };
 
 struct scheduler {
@@ -48,6 +79,4 @@ struct scheduler {
     void         (*dump_cpu_state) (int);
 };
 
-extern struct schedule_data schedule_data[];
-
 #endif /* __XEN_SCHED_IF_H__ */