int rc;
if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
- {
- struct msr_domain_policy *dp = &raw_msr_domain_policy;
-
- dp->plaform_info.available = true;
- if (val & MSR_PLATFORM_INFO_CPUID_FAULTING)
- dp->plaform_info.cpuid_faulting = true;
- }
+ raw_msr_domain_policy.plaform_info.cpuid_faulting =
+ val & MSR_PLATFORM_INFO_CPUID_FAULTING;
if (rc ||
!(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
static void __init calculate_hvm_max_policy(void)
{
struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
- struct msr_vcpu_policy *vp = &hvm_max_msr_vcpu_policy;
if ( !hvm_enabled )
return;
*dp = host_msr_domain_policy;
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
/* It's always possible to emulate CPUID faulting for HVM guests */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- {
- dp->plaform_info.available = true;
- dp->plaform_info.cpuid_faulting = true;
- }
-
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
- vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
+ dp->plaform_info.cpuid_faulting = true;
}
static void __init calculate_pv_max_policy(void)
{
struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
- struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy;
*dp = host_msr_domain_policy;
-
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
- vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
}
void __init init_guest_msr_policy(void)
/* See comment in intel_ctxt_switch_levelling() */
if ( is_control_domain(d) )
- {
- dp->plaform_info.available = false;
dp->plaform_info.cpuid_faulting = false;
- }
d->arch.msr = dp;
*vp = is_pv_domain(d) ? pv_max_msr_vcpu_policy :
hvm_max_msr_vcpu_policy;
- /* See comment in intel_ctxt_switch_levelling() */
- if ( is_control_domain(d) )
- vp->misc_features_enables.available = false;
-
v->arch.msr = vp;
return 0;
break;
case MSR_INTEL_PLATFORM_INFO:
- if ( !dp->plaform_info.available )
- goto gp_fault;
*val = (uint64_t)dp->plaform_info.cpuid_faulting <<
_MSR_PLATFORM_INFO_CPUID_FAULTING;
break;
goto gp_fault;
case MSR_INTEL_MISC_FEATURES_ENABLES:
- if ( !vp->misc_features_enables.available )
- goto gp_fault;
*val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
_MSR_MISC_FEATURES_CPUID_FAULTING;
break;
{
bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
- if ( !vp->misc_features_enables.available )
- goto gp_fault;
-
rsvd = ~0ull;
if ( dp->plaform_info.cpuid_faulting )
rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
/* MSR policy object for shared per-domain MSRs */
struct msr_domain_policy
{
- /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
+ /*
+ * 0x000000ce - MSR_INTEL_PLATFORM_INFO
+ *
+ * This MSR is non-architectural, but for simplicy we allow it to be read
+ * unconditionally. CPUID Faulting support can be fully emulated for HVM
+ * guests so can be offered unconditionally, while support for PV guests
+ * is dependent on real hardware support.
+ */
struct {
- bool available; /* This MSR is non-architectural */
bool cpuid_faulting;
} plaform_info;
};
uint32_t raw;
} spec_ctrl;
- /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ /*
+ * 0x00000140 - MSR_INTEL_MISC_FEATURES_ENABLES
+ *
+ * This MSR is non-architectural, but for simplicy we allow it to be read
+ * unconditionally. The CPUID Faulting bit is the only writeable bit, and
+ * only if enumerated by MSR_PLATFORM_INFO.
+ */
struct {
- bool available; /* This MSR is non-architectural */
bool cpuid_faulting;
} misc_features_enables;
};