CPUIDLE: shorten hpet spin_lock holding time
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 4 May 2010 11:52:48 +0000 (12:52 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 4 May 2010 11:52:48 +0000 (12:52 +0100)
Try to reduce spin_lock overhead for deep C state entry/exit. This
will benefit systems with a lot of cpus which need the hpet broadcast
to wakeup from deep C state.

Signed-off-by: Wei Gang <gang.wei@intel.com>
xen/arch/x86/hpet.c

index 1cedb1cb3cc2d434b9caf1bee27aa86633503922..086a8716179b40dc2c162413514c4b16f832f779 100644 (file)
@@ -186,6 +186,9 @@ static void handle_hpet_broadcast(struct hpet_event_channel *ch)
 
 again:
     ch->next_event = STIME_MAX;
+
+    spin_unlock_irq(&ch->lock);
+
     next_event = STIME_MAX;
     mask = (cpumask_t)CPU_MASK_NONE;
     now = NOW();
@@ -193,10 +196,17 @@ again:
     /* find all expired events */
     for_each_cpu_mask(cpu, ch->cpumask)
     {
-        if ( per_cpu(timer_deadline_start, cpu) <= now )
-            cpu_set(cpu, mask);
-        else if ( per_cpu(timer_deadline_end, cpu) < next_event )
-            next_event = per_cpu(timer_deadline_end, cpu);
+        spin_lock_irq(&ch->lock);
+
+        if ( cpumask_test_cpu(cpu, ch->cpumask) )
+        {
+            if ( per_cpu(timer_deadline_start, cpu) <= now )
+                cpu_set(cpu, mask);
+            else if ( per_cpu(timer_deadline_end, cpu) < next_event )
+                next_event = per_cpu(timer_deadline_end, cpu);
+        }
+
+        spin_unlock_irq(&ch->lock);
     }
 
     /* wakeup the cpus which have an expired event. */
@@ -204,10 +214,14 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
+        spin_lock_irq(&ch->lock);
+
+        if ( next_event < ch->next_event &&
+             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
+
+        spin_unlock_irq(&ch->lock);
     }
-    spin_unlock_irq(&ch->lock);
 }
 
 static void hpet_interrupt_handler(int irq, void *data,
@@ -656,17 +670,23 @@ void hpet_broadcast_enter(void)
     BUG_ON( !ch );
 
     ASSERT(!local_irq_is_enabled());
-    spin_lock(&ch->lock);
 
     if ( hpet_attach_channel )
+    {
+        spin_lock(&ch->lock);
+
         hpet_attach_channel(cpu, ch);
 
+        spin_unlock(&ch->lock);
+    }
+
     /* Cancel any outstanding LAPIC timer event and disable interrupts. */
     reprogram_timer(0);
     disable_APIC_timer();
 
-    cpu_set(cpu, ch->cpumask);
+    spin_lock(&ch->lock);
 
+    cpu_set(cpu, ch->cpumask);
     /* reprogram if current cpu expire time is nearer */
     if ( this_cpu(timer_deadline_end) < ch->next_event )
         reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
@@ -684,23 +704,28 @@ void hpet_broadcast_exit(void)
 
     BUG_ON( !ch );
 
+    /* Reprogram the deadline; trigger timer work now if it has passed. */
+    enable_APIC_timer();
+    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+        raise_softirq(TIMER_SOFTIRQ);
+
     spin_lock_irq(&ch->lock);
 
-    if ( cpu_test_and_clear(cpu, ch->cpumask) )
-    {
-        /* Reprogram the deadline; trigger timer work now if it has passed. */
-        enable_APIC_timer();
-        if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
-            raise_softirq(TIMER_SOFTIRQ);
+    cpu_clear(cpu, ch->cpumask);
+    if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
+        reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
+
+    spin_unlock_irq(&ch->lock);
 
-        if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
-            reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
-    }
 
     if ( hpet_detach_channel )
+    {
+        spin_lock_irq(&ch->lock);
+
         hpet_detach_channel(cpu);
 
-    spin_unlock_irq(&ch->lock);
+        spin_unlock_irq(&ch->lock);
+    }
 }
 
 int hpet_broadcast_is_available(void)