x86: add irq count for IPIs
authorKevin Tian <kevin.tian@intel.com>
Wed, 31 Aug 2011 14:18:23 +0000 (15:18 +0100)
committerKevin Tian <kevin.tian@intel.com>
Wed, 31 Aug 2011 14:18:23 +0000 (15:18 +0100)
such count is useful to assist decision make in cpuidle governor,
while w/o this patch only device interrupts through do_IRQ is
currently counted.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/apic.c
xen/arch/x86/cpu/mcheck/mce_intel.c
xen/arch/x86/io_apic.c
xen/arch/x86/smp.c

index 3d4d5ccc5fc5e239aa8ff93da4af7f3648832060..6a75036a09774898da8782f040d93d22753b5b52 100644 (file)
@@ -1332,6 +1332,7 @@ fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(apic_timer);
+    this_cpu(irq_count)++;
     raise_softirq(TIMER_SOFTIRQ);
     set_irq_regs(old_regs);
 }
@@ -1353,6 +1354,7 @@ fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
     unsigned long v;
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
+    this_cpu(irq_count)++;
     irq_enter();
 
     /*
@@ -1388,6 +1390,7 @@ fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
     unsigned long v, v1;
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
+    this_cpu(irq_count)++;
     irq_enter();
     /* First tickle the hardware, only then report what went on. -- REW */
     v = apic_read(APIC_ESR);
@@ -1419,6 +1422,7 @@ fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
 {
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     hvm_do_pmu_interrupt(regs);
     set_irq_regs(old_regs);
 }
index d6d4bb99abda55952e4898c4e5d9adfc5e99da9f..bf75f6714c8f3a6a845432c98feb0dc4d670e437 100644 (file)
@@ -77,6 +77,7 @@ static void (*__read_mostly vendor_thermal_interrupt)(struct cpu_user_regs *regs
 fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
 {
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
+    this_cpu(irq_count)++;
     irq_enter();
     vendor_thermal_interrupt(regs);
     irq_exit();
@@ -1147,6 +1148,7 @@ fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs)
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     irq_enter();
 
     mctc = mcheck_mca_logout(
index dd45ff414e85e853590ad595ab9d80ee2838977e..471f7530e47edbb8ba538e94cfe658142172d049 100644 (file)
@@ -441,6 +441,7 @@ fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     irq_enter();
 
     me = smp_processor_id();
index 2f527079cdf1141f08d8a77ce9db4a994374b4c7..3edebf3df1a355eecde895085052cd1c2bd27377 100644 (file)
@@ -221,6 +221,7 @@ fastcall void smp_invalidate_interrupt(void)
 {
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     irq_enter();
     if ( !__sync_local_execstate() ||
          (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
@@ -385,6 +386,7 @@ fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     set_irq_regs(old_regs);
 }
 
@@ -421,6 +423,7 @@ fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
 
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     __smp_call_function_interrupt();
     set_irq_regs(old_regs);
 }