static DEFINE_PER_CPU(bool_t, vmxon);
static u32 vmcs_revision_id __read_mostly;
+u64 __read_mostly vmx_basic_msr;
static void __init vmx_display_features(void)
{
vmx_vmexit_control = _vmx_vmexit_control;
vmx_vmentry_control = _vmx_vmentry_control;
cpu_has_vmx_ins_outs_instr_info = !!(vmx_basic_msr_high & (1U<<22));
+ vmx_basic_msr = ((u64)vmx_basic_msr_high << 32) |
+ vmx_basic_msr_low;
vmx_display_features();
}
else
int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
{
struct vcpu *v = current;
+ unsigned int ecx, dummy;
u64 data = 0, host_data = 0;
int r = 1;
if ( !nestedhvm_enabled(v->domain) )
return 0;
+ /* VMX capablity MSRs are available only when guest supports VMX. */
+ hvm_cpuid(0x1, &dummy, &dummy, &ecx, &dummy);
+ if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
+ return 0;
+
+ /*
+ * Those MSRs are available only when bit 55 of
+ * MSR_IA32_VMX_BASIC is set.
+ */
+ switch ( msr )
+ {
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ if ( !(vmx_basic_msr & VMX_BASIC_DEFAULT1_ZERO) )
+ return 0;
+ break;
+ }
+
rdmsrl(msr, host_data);
/*
*/
#define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
+extern u64 vmx_basic_msr;
+
/* Guest interrupt status */
#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8