x86: remove redundancy of MSR_P6_{PERFCTR,EVNTSEL} definitions
authorJan Beulich <jbeulich@suse.com>
Fri, 5 Sep 2014 08:58:00 +0000 (10:58 +0200)
committerJan Beulich <jbeulich@suse.com>
Fri, 5 Sep 2014 08:58:00 +0000 (10:58 +0200)
Not only did the EVNTSEL ones get defined twice, we can also easily
abstract out the numbers previously attached to them.

While at it also remove an unused Geode-related define.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevint.tian@intel.com>
xen/arch/x86/hvm/vmx/vpmu_core2.c
xen/arch/x86/nmi.c
xen/arch/x86/oprofile/op_model_ppro.c
xen/include/asm-x86/msr-index.h

index 56f5059f4665ed4807764748baeee3493273743b..68b6272be9d35303f1b1d17e9ea46f71a7d9b36a 100644 (file)
@@ -105,9 +105,9 @@ static void handle_pmc_quirk(u64 msr_content)
         if ( val & 0x1 )
         {
             u64 cnt;
-            rdmsrl(MSR_P6_PERFCTR0 + i, cnt);
+            rdmsrl(MSR_P6_PERFCTR(i), cnt);
             if ( cnt == 0 )
-                wrmsrl(MSR_P6_PERFCTR0 + i, 1);
+                wrmsrl(MSR_P6_PERFCTR(i), 1);
         }
         val >>= 1;
     }
@@ -238,11 +238,11 @@ static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index)
         return 1;
     }
 
-    if ( (msr_index >= MSR_P6_EVNTSEL0) &&
-         (msr_index < (MSR_P6_EVNTSEL0 + core2_get_pmc_count())) )
+    if ( (msr_index >= MSR_P6_EVNTSEL(0)) &&
+         (msr_index < (MSR_P6_EVNTSEL(core2_get_pmc_count()))) )
     {
         *type = MSR_TYPE_ARCH_CTRL;
-        *index = msr_index - MSR_P6_EVNTSEL0;
+        *index = msr_index - MSR_P6_EVNTSEL(0);
         return 1;
     }
 
@@ -278,7 +278,7 @@ static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap)
     for ( i = 0; i < core2_ctrls.num; i++ )
         clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
-        clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
+        clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL(i)), msr_bitmap);
 }
 
 static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap)
@@ -308,7 +308,7 @@ static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap)
     for ( i = 0; i < core2_ctrls.num; i++ )
         set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
-        set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
+        set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL(i)), msr_bitmap);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
@@ -359,7 +359,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
     for ( i = 0; i < core2_ctrls.num; i++ )
         wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
     for ( i = 0; i < core2_get_pmc_count(); i++ )
-        wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
+        wrmsrl(MSR_P6_EVNTSEL(i), core2_vpmu_cxt->arch_msr_pair[i].control);
 }
 
 static void core2_vpmu_load(struct vcpu *v)
@@ -526,7 +526,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
         global_ctrl = msr_content;
         for ( i = 0; i < core2_get_pmc_count(); i++ )
         {
-            rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl);
+            rdmsrl(MSR_P6_EVNTSEL(i), non_global_ctrl);
             core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] =
                     global_ctrl & (non_global_ctrl >> 22) & 1;
             global_ctrl >>= 1;
@@ -555,7 +555,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
         }
         break;
     default:
-        tmp = msr - MSR_P6_EVNTSEL0;
+        tmp = msr - MSR_P6_EVNTSEL(0);
         vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         if ( tmp >= 0 && tmp < core2_get_pmc_count() )
             core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
index f8f5e4a41c73001c676aeddf0183c61a3a635dc1..98c1e15fa1c723d276154e36486945d4040d13f9 100644 (file)
@@ -205,7 +205,7 @@ void disable_lapic_nmi_watchdog(void)
     case X86_VENDOR_INTEL:
         switch (boot_cpu_data.x86) {
         case 6:
-            wrmsr(MSR_P6_EVNTSEL0, 0, 0);
+            wrmsr(MSR_P6_EVNTSEL(0), 0, 0);
             break;
         case 15:
             wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
@@ -304,21 +304,21 @@ static void __pminit setup_p6_watchdog(unsigned counter)
 {
     unsigned int evntsel;
 
-    nmi_perfctr_msr = MSR_P6_PERFCTR0;
+    nmi_perfctr_msr = MSR_P6_PERFCTR(0);
 
-    clear_msr_range(MSR_P6_EVNTSEL0, 2);
-    clear_msr_range(MSR_P6_PERFCTR0, 2);
+    clear_msr_range(MSR_P6_EVNTSEL(0), 2);
+    clear_msr_range(MSR_P6_PERFCTR(0), 2);
 
     evntsel = P6_EVNTSEL_INT
         | P6_EVNTSEL_OS
         | P6_EVNTSEL_USR
         | counter;
 
-    wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
+    wrmsr(MSR_P6_EVNTSEL(0), evntsel, 0);
     write_watchdog_counter("P6_PERFCTR0");
     apic_write(APIC_LVTPC, APIC_DM_NMI);
     evntsel |= P6_EVNTSEL0_ENABLE;
-    wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
+    wrmsr(MSR_P6_EVNTSEL(0), evntsel, 0);
 }
 
 static int __pminit setup_p4_watchdog(void)
@@ -508,9 +508,9 @@ bool_t nmi_watchdog_tick(const struct cpu_user_regs *regs)
             wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val);
             apic_write(APIC_LVTPC, APIC_DM_NMI);
         }
-        else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 )
+        else if ( nmi_perfctr_msr == MSR_P6_PERFCTR(0) )
         {
-            rdmsrl(MSR_P6_PERFCTR0, msr_content);
+            rdmsrl(MSR_P6_PERFCTR(0), msr_content);
             if ( msr_content & (1ULL << P6_EVENT_WIDTH) )
                 watchdog_tick = 0;
 
index 8b9f3f60c4a1943f49a785663256ee4e6b1649f7..aa99e4d2bc8c4e286a939d18ff7074802b79deda 100644 (file)
@@ -64,9 +64,9 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
        int i;
 
        for (i = 0; i < num_counters; i++)
-               msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+               msrs->counters[i].addr = MSR_P6_PERFCTR(i);
        for (i = 0; i < num_counters; i++)
-               msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+               msrs->controls[i].addr = MSR_P6_EVNTSEL(i);
 }
 
 
@@ -211,11 +211,11 @@ static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
                *index = msr_index - MSR_IA32_PERFCTR0;
                return 1;
         }
-        if ( (msr_index >= MSR_P6_EVNTSEL0) &&
-            (msr_index < (MSR_P6_EVNTSEL0 + num_counters)) )
+        if ( (msr_index >= MSR_P6_EVNTSEL(0)) &&
+            (msr_index < (MSR_P6_EVNTSEL(num_counters))) )
         {
                *type = MSR_TYPE_ARCH_CTRL;
-               *index = msr_index - MSR_P6_EVNTSEL0;
+               *index = msr_index - MSR_P6_EVNTSEL(0);
                return 1;
         }
 
index da732b775da1b03207c23d425bc40923d305a4c1..2056501f8705ee6efcf79e6fd41479124d651de2 100644 (file)
 
 #define MSR_AMD64_MCx_MASK(x)          (MSR_AMD64_MC0_MASK + (x))
 
-#define MSR_P6_PERFCTR0                        0x000000c1
-#define MSR_P6_PERFCTR1                        0x000000c2
-#define MSR_P6_EVNTSEL0                        0x00000186
-#define MSR_P6_EVNTSEL1                        0x00000187
-
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC                      0x480
 #define MSR_IA32_VMX_PINBASED_CTLS              0x481
 #define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
 
 /* Intel Model 6 */
-#define MSR_P6_EVNTSEL0                        0x00000186
-#define MSR_P6_EVNTSEL1                        0x00000187
+#define MSR_P6_PERFCTR(n)              (0x000000c1 + (n))
+#define MSR_P6_EVNTSEL(n)              (0x00000186 + (n))
 
 /* P4/Xeon+ specific */
 #define MSR_IA32_MCG_EAX               0x00000180
 #define _MSR_MISC_FEATURES_CPUID_FAULTING      0
 #define MSR_MISC_FEATURES_CPUID_FAULTING       (1ULL << _MSR_MISC_FEATURES_CPUID_FAULTING)
 
-/* Geode defined MSRs */
-#define MSR_GEODE_BUSCONT_CONF0                0x00001900
-
 #endif /* __ASM_MSR_INDEX_H */