x86: Allow HPET to set timers more sloppily by seeing each CPU's
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Dec 2009 08:50:13 +0000 (08:50 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Dec 2009 08:50:13 +0000 (08:50 +0000)
acceptable deadline range, rather than just deadline start.

Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/acpi/cpuidle_menu.c
xen/arch/x86/hpet.c
xen/arch/x86/time.c
xen/common/timer.c
xen/include/xen/timer.h

index e2c350962c563255a3adbdab99065350f56d0de7..e008918f0cc1298709a21b5dfe41061d208cf4ad 100644 (file)
@@ -49,7 +49,7 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
 
 static unsigned int get_sleep_length_us(void)
 {
-    s_time_t us = (per_cpu(timer_deadline, smp_processor_id()) - NOW()) / 1000;
+    s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
     /*
      * while us < 0 or us > (u32)-1, return a large u32,
      * choose (unsigned int)-2000 to avoid wrapping while added with exit
index ec2a2b6afdb76c9c215f6a979f3b6ee3321a228f..767f563f840a9c723270343e3fdcaef4de721f4f 100644 (file)
@@ -190,10 +190,10 @@ again:
     /* find all expired events */
     for_each_cpu_mask(cpu, ch->cpumask)
     {
-        if ( per_cpu(timer_deadline, cpu) <= now )
+        if ( per_cpu(timer_deadline_start, cpu) <= now )
             cpu_set(cpu, mask);
-        else if ( per_cpu(timer_deadline, cpu) < next_event )
-            next_event = per_cpu(timer_deadline, cpu);
+        else if ( per_cpu(timer_deadline_end, cpu) < next_event )
+            next_event = per_cpu(timer_deadline_end, cpu);
     }
 
     /* wakeup the cpus which have an expired event. */
@@ -629,7 +629,7 @@ void hpet_broadcast_enter(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline) == 0 )
+    if ( this_cpu(timer_deadline_start) == 0 )
         return;
 
     if ( !ch )
@@ -649,8 +649,8 @@ void hpet_broadcast_enter(void)
     cpu_set(cpu, ch->cpumask);
 
     /* reprogram if current cpu expire time is nearer */
-    if ( this_cpu(timer_deadline) < ch->next_event )
-        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
+    if ( this_cpu(timer_deadline_end) < ch->next_event )
+        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
 
     spin_unlock(&ch->lock);
 }
@@ -660,7 +660,7 @@ void hpet_broadcast_exit(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline) == 0 )
+    if ( this_cpu(timer_deadline_start) == 0 )
         return;
 
     BUG_ON( !ch );
@@ -671,7 +671,7 @@ void hpet_broadcast_exit(void)
     {
         /* Reprogram the deadline; trigger timer work now if it has passed. */
         enable_APIC_timer();
-        if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
+        if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
             raise_softirq(TIMER_SOFTIRQ);
 
         if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
index ced77e26a63489da3e7ce4f978afa562d2284531..cb8f1ff4ecf634c073d5a216e2029b670d155f62 100644 (file)
@@ -1367,7 +1367,7 @@ void pit_broadcast_exit(void)
     int cpu = smp_processor_id();
 
     if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
-        reprogram_timer(per_cpu(timer_deadline, cpu));
+        reprogram_timer(per_cpu(timer_deadline_start, cpu));
 }
 
 int pit_broadcast_is_available(void)
index aaa4f4e18c27d4f4e8c28e29ec9cc5648a7c3007..5cf61118d54c17bacf57d37b234300586683074b 100644 (file)
@@ -38,7 +38,8 @@ struct timers {
 
 static DEFINE_PER_CPU(struct timers, timers);
 
-DEFINE_PER_CPU(s_time_t, timer_deadline);
+DEFINE_PER_CPU(s_time_t, timer_deadline_start);
+DEFINE_PER_CPU(s_time_t, timer_deadline_end);
 
 /****************************************************************************
  * HEAP OPERATIONS.
@@ -425,10 +426,11 @@ static void timer_softirq_action(void)
     if ( unlikely(ts->overflow) )
     {
         /* Find earliest deadline at head of list or top of heap. */
-        this_cpu(timer_deadline) = ts->list->expires;
+        this_cpu(timer_deadline_start) = ts->list->expires;
         if ( (GET_HEAP_SIZE(heap) != 0) &&
-             ((t = heap[1])->expires < this_cpu(timer_deadline)) )
-            this_cpu(timer_deadline) = t->expires;
+             ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
+            this_cpu(timer_deadline_start) = t->expires;
+        this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
     }
     else
     {
@@ -455,10 +457,11 @@ static void timer_softirq_action(void)
                 end = t->expires_end;
         }
 
-        this_cpu(timer_deadline) = start;
+        this_cpu(timer_deadline_start) = start;
+        this_cpu(timer_deadline_end) = end;
     }
 
-    if ( !reprogram_timer(this_cpu(timer_deadline)) )
+    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
         raise_softirq(TIMER_SOFTIRQ);
 
     spin_unlock_irq(&ts->lock);
index 0379d950a3f312f2039bf72f493753f7cf396598..1994eff00fee7160a961729cf4f49b3d518d62c1 100644 (file)
@@ -117,7 +117,8 @@ extern void timer_init(void);
  * Next timer deadline for each CPU.
  * Modified only by the local CPU and never in interrupt context.
  */
-DECLARE_PER_CPU(s_time_t, timer_deadline);
+DECLARE_PER_CPU(s_time_t, timer_deadline_start);
+DECLARE_PER_CPU(s_time_t, timer_deadline_end);
 
 /* Arch-defined function to reprogram timer hardware for new deadline. */
 extern int reprogram_timer(s_time_t timeout);