From: Keir Fraser Date: Fri, 29 Aug 2008 10:13:41 +0000 (+0100) Subject: amd: Extended migration support X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14113^2~7 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=2c9b12c9e4238e1d3d4fe5117bb22801188498a0;p=xen.git amd: Extended migration support This patch adds support for AMD's extended migration, aka CPUID features and extended features masking. Signed-off-by: Travis Betak Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c index 766d645f90..407ecf33bf 100644 --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -10,9 +10,143 @@ #include #include "cpu.h" +#include "amd.h" int start_svm(struct cpuinfo_x86 *c); +/* + * Pre-canned values for overriding the CPUID features + * and extended features masks. + * + * Currently supported processors: + * + * "fam_0f_rev_c" + * "fam_0f_rev_d" + * "fam_0f_rev_e" + * "fam_0f_rev_f" + * "fam_0f_rev_g" + * "fam_10_rev_b" + * "fam_10_rev_c" + * "fam_11_rev_b" + */ +static char opt_famrev[14]; +string_param("cpuid_mask_cpu", opt_famrev); + +/* Finer-grained CPUID feature control. */ +static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx; +integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx); +integer_param("cpuid_mask_edx", opt_cpuid_mask_edx); +static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx; +integer_param("cpuid_mask_ecx", opt_cpuid_mask_ext_ecx); +integer_param("cpuid_mask_edx", opt_cpuid_mask_ext_edx); + +static inline void wrmsr_amd(unsigned int index, unsigned int lo, + unsigned int hi) +{ + asm volatile ( + "wrmsr" + : /* No outputs */ + : "c" (index), "a" (lo), + "d" (hi), "D" (0x9c5a203a) + ); +} + +/* + * Mask the features and extended features returned by CPUID. Parameters are + * set from the boot line via two methods: + * + * 1) Specific processor revision string + * 2) User-defined masks + * + * The processor revision string parameter has precedene. + */ +static void __devinit set_cpuidmask(struct cpuinfo_x86 *c) +{ + static unsigned int feat_ecx, feat_edx; + static unsigned int extfeat_ecx, extfeat_edx; + static enum { not_parsed, no_mask, set_mask } status; + + if (status == no_mask) + return; + + if (status == set_mask) + goto setmask; + + ASSERT((status == not_parsed) && (smp_processor_id() == 0)); + status = no_mask; + + if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx | + opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) { + feat_ecx = opt_cpuid_mask_ecx ? : ~0U; + feat_edx = opt_cpuid_mask_edx ? : ~0U; + extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U; + extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U; + } else if (*opt_famrev == '\0') { + return; + } else if (!strcmp(opt_famrev, "fam_0f_rev_c")) { + feat_ecx = AMD_FEATURES_K8_REV_C_ECX; + feat_edx = AMD_FEATURES_K8_REV_C_EDX; + extfeat_ecx = AMD_EXTFEATURES_K8_REV_C_ECX; + extfeat_edx = AMD_EXTFEATURES_K8_REV_C_EDX; + } else if (!strcmp(opt_famrev, "fam_0f_rev_d")) { + feat_ecx = AMD_FEATURES_K8_REV_D_ECX; + feat_edx = AMD_FEATURES_K8_REV_D_EDX; + extfeat_ecx = AMD_EXTFEATURES_K8_REV_D_ECX; + extfeat_edx = AMD_EXTFEATURES_K8_REV_D_EDX; + } else if (!strcmp(opt_famrev, "fam_0f_rev_e")) { + feat_ecx = AMD_FEATURES_K8_REV_E_ECX; + feat_edx = AMD_FEATURES_K8_REV_E_EDX; + extfeat_ecx = AMD_EXTFEATURES_K8_REV_E_ECX; + extfeat_edx = AMD_EXTFEATURES_K8_REV_E_EDX; + } else if (!strcmp(opt_famrev, "fam_0f_rev_f")) { + feat_ecx = AMD_FEATURES_K8_REV_F_ECX; + feat_edx = AMD_FEATURES_K8_REV_F_EDX; + extfeat_ecx = AMD_EXTFEATURES_K8_REV_F_ECX; + extfeat_edx = AMD_EXTFEATURES_K8_REV_F_EDX; + } else if (!strcmp(opt_famrev, "fam_0f_rev_g")) { + feat_ecx = AMD_FEATURES_K8_REV_G_ECX; + feat_edx = AMD_FEATURES_K8_REV_G_EDX; + extfeat_ecx = AMD_EXTFEATURES_K8_REV_G_ECX; + extfeat_edx = AMD_EXTFEATURES_K8_REV_G_EDX; + } else if (!strcmp(opt_famrev, "fam_10_rev_b")) { + feat_ecx = AMD_FEATURES_FAM10h_REV_B_ECX; + feat_edx = AMD_FEATURES_FAM10h_REV_B_EDX; + extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_B_ECX; + extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_B_EDX; + } else if (!strcmp(opt_famrev, "fam_10_rev_c")) { + feat_ecx = AMD_FEATURES_FAM10h_REV_C_ECX; + feat_edx = AMD_FEATURES_FAM10h_REV_C_EDX; + extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_C_ECX; + extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_C_EDX; + } else if (!strcmp(opt_famrev, "fam_11_rev_b")) { + feat_ecx = AMD_FEATURES_FAM11h_REV_B_ECX; + feat_edx = AMD_FEATURES_FAM11h_REV_B_EDX; + extfeat_ecx = AMD_EXTFEATURES_FAM11h_REV_B_ECX; + extfeat_edx = AMD_EXTFEATURES_FAM11h_REV_B_EDX; + } else { + printk("Invalid processor string: %s\n", opt_famrev); + printk("CPUID will not be masked\n"); + return; + } + + status = set_mask; + printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n", + feat_ecx, feat_edx); + printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n", + extfeat_ecx, extfeat_edx); + + setmask: + /* FIXME check if processor supports CPUID masking */ + /* AMD processors prior to family 10h required a 32-bit password */ + if (c->x86 >= 0x10) { + wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); + wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); + } else if (c->x86 == 0x0f) { + wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); + wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); + } +} + /* * amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush * filter on AMD 64-bit processors. @@ -115,7 +249,7 @@ static void check_disable_c1e(unsigned int port, u8 value) on_each_cpu(disable_c1e, NULL, 1, 1); } -static void __init init_amd(struct cpuinfo_x86 *c) +static void __devinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; int mbytes = num_physpages >> (20-PAGE_SHIFT); @@ -368,6 +502,8 @@ static void __init init_amd(struct cpuinfo_x86 *c) if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c)) disable_c1_ramping(); + set_cpuidmask(c); + start_svm(c); } diff --git a/xen/arch/x86/cpu/amd.h b/xen/arch/x86/cpu/amd.h new file mode 100644 index 0000000000..4da3d64a87 --- /dev/null +++ b/xen/arch/x86/cpu/amd.h @@ -0,0 +1,103 @@ +/* + * amd.h - AMD processor specific definitions + */ + +#ifndef __AMD_H__ +#define __AMD_H__ + +#include + +/* CPUID masked for use by AMD-V Extended Migration */ + +#define X86_FEATURE_BITPOS(_feature_) ((_feature_) % 32) +#define __bit(_x_) (1U << X86_FEATURE_BITPOS(_x_)) + +/* Family 0Fh, Revision C */ +#define AMD_FEATURES_K8_REV_C_ECX 0 +#define AMD_FEATURES_K8_REV_C_EDX ( \ + __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \ + __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \ + __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \ + __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \ + __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \ + __bit(X86_FEATURE_SEP) | __bit(X86_FEATURE_MTRR) | \ + __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \ + __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \ + __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_CLFLSH)| \ + __bit(X86_FEATURE_MMX) | __bit(X86_FEATURE_FXSR) | \ + __bit(X86_FEATURE_XMM) | __bit(X86_FEATURE_XMM2)) +#define AMD_EXTFEATURES_K8_REV_C_ECX 0 +#define AMD_EXTFEATURES_K8_REV_C_EDX ( \ + __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \ + __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \ + __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \ + __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \ + __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \ + __bit(X86_FEATURE_SYSCALL) | __bit(X86_FEATURE_MTRR) | \ + __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \ + __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \ + __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_NX) | \ + __bit(X86_FEATURE_MMXEXT) | __bit(X86_FEATURE_MMX) | \ + __bit(X86_FEATURE_FXSR) | __bit(X86_FEATURE_LM) | \ + __bit(X86_FEATURE_3DNOWEXT) | __bit(X86_FEATURE_3DNOW)) + +/* Family 0Fh, Revision D */ +#define AMD_FEATURES_K8_REV_D_ECX AMD_FEATURES_K8_REV_C_ECX +#define AMD_FEATURES_K8_REV_D_EDX AMD_FEATURES_K8_REV_C_EDX +#define AMD_EXTFEATURES_K8_REV_D_ECX (AMD_EXTFEATURES_K8_REV_C_ECX |\ + __bit(X86_FEATURE_LAHF_LM)) +#define AMD_EXTFEATURES_K8_REV_D_EDX (AMD_EXTFEATURES_K8_REV_C_EDX |\ + __bit(X86_FEATURE_FFXSR)) + +/* Family 0Fh, Revision E */ +#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \ + __bit(X86_FEATURE_XMM3)) +#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \ + __bit(X86_FEATURE_HT)) +#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\ + __bit(X86_FEATURE_CMP_LEGACY)) +#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX + +/* Family 0Fh, Revision F */ +#define AMD_FEATURES_K8_REV_F_ECX (AMD_FEATURES_K8_REV_E_ECX | \ + __bit(X86_FEATURE_CX16)) +#define AMD_FEATURES_K8_REV_F_EDX AMD_FEATURES_K8_REV_E_EDX +#define AMD_EXTFEATURES_K8_REV_F_ECX (AMD_EXTFEATURES_K8_REV_E_ECX |\ + __bit(X86_FEATURE_SVME) | __bit(X86_FEATURE_EXTAPICSPACE) | \ + __bit(X86_FEATURE_ALTMOVCR)) +#define AMD_EXTFEATURES_K8_REV_F_EDX (AMD_EXTFEATURES_K8_REV_E_EDX |\ + __bit(X86_FEATURE_RDTSCP)) + +/* Family 0Fh, Revision G */ +#define AMD_FEATURES_K8_REV_G_ECX AMD_FEATURES_K8_REV_F_ECX +#define AMD_FEATURES_K8_REV_G_EDX AMD_FEATURES_K8_REV_F_EDX +#define AMD_EXTFEATURES_K8_REV_G_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ + __bit(X86_FEATURE_3DNOWPF)) +#define AMD_EXTFEATURES_K8_REV_G_EDX AMD_EXTFEATURES_K8_REV_F_EDX + +/* Family 10h, Revision B */ +#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \ + __bit(X86_FEATURE_POPCNT) | __bit(X86_FEATURE_MWAIT)) +#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX +#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\ + __bit(X86_FEATURE_ABM) | __bit(X86_FEATURE_SSE4A) | \ + __bit(X86_FEATURE_MISALIGNSSE) | __bit(X86_FEATURE_OSVW) | \ + __bit(X86_FEATURE_IBS)) +#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\ + __bit(X86_FEATURE_PAGE1GB)) + +/* Family 10h, Revision C */ +#define AMD_FEATURES_FAM10h_REV_C_ECX AMD_FEATURES_FAM10h_REV_B_ECX +#define AMD_FEATURES_FAM10h_REV_C_EDX AMD_FEATURES_FAM10h_REV_B_EDX +#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\ + __bit(X86_FEATURE_SKINIT) | __bit(X86_FEATURE_WDT)) +#define AMD_EXTFEATURES_FAM10h_REV_C_EDX AMD_EXTFEATURES_FAM10h_REV_B_EDX + +/* Family 11h, Revision B */ +#define AMD_FEATURES_FAM11h_REV_B_ECX AMD_FEATURES_K8_REV_G_ECX +#define AMD_FEATURES_FAM11h_REV_B_EDX AMD_FEATURES_K8_REV_G_EDX +#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\ + __bit(X86_FEATURE_SKINIT)) +#define AMD_EXTFEATURES_FAM11h_REV_B_EDX AMD_EXTFEATURES_K8_REV_G_EDX + +#endif /* __AMD_H__ */ diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index 7e2164797e..b49baf2f1a 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -187,6 +187,9 @@ #define MSR_K8_VM_CR 0xc0010114 #define MSR_K8_VM_HSAVE_PA 0xc0010117 +#define MSR_K8_FEATURE_MASK 0xc0011004 +#define MSR_K8_EXT_FEATURE_MASK 0xc0011005 + /* MSR_K8_VM_CR bits: */ #define _K8_VMCR_SVME_DISABLE 4 #define K8_VMCR_SVME_DISABLE (1 << _K8_VMCR_SVME_DISABLE)