*ebx = _eax + _ebx;
}
}
+ if ( count == 1 )
+ {
+ if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
+ {
+ *ebx = XSTATE_AREA_MIN_SIZE;
+ if ( v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss )
+ for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ )
+ if ( (v->arch.xcr0 | v->arch.hvm_vcpu.msr_xss) &
+ (1ULL << sub_leaf) )
+ *ebx += xstate_sizes[sub_leaf];
+ }
+ else
+ *ebx = *ecx = *edx = 0;
+ }
break;
case 0x80000001:
*msr_content = v->arch.hvm_vcpu.guest_efer;
break;
+ case MSR_IA32_XSS:
+ if ( !cpu_has_xsaves )
+ goto gp_fault;
+ *msr_content = v->arch.hvm_vcpu.msr_xss;
+ break;
+
case MSR_IA32_TSC:
*msr_content = _hvm_rdtsc_intercept();
break;
return X86EMUL_EXCEPTION;
break;
+ case MSR_IA32_XSS:
+ /* No XSS features currently supported for guests. */
+ if ( !cpu_has_xsaves || msr_content != 0 )
+ goto gp_fault;
+ v->arch.hvm_vcpu.msr_xss = msr_content;
+ break;
+
case MSR_IA32_TSC:
hvm_set_guest_tsc(v, msr_content);
break;
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_ENABLE_VM_FUNCTIONS |
- SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS);
+ SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS |
+ SECONDARY_EXEC_XSAVES);
rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap);
if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL )
opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING;
__vmwrite(HOST_PAT, host_pat);
__vmwrite(GUEST_PAT, guest_pat);
}
+ if ( cpu_has_vmx_xsaves )
+ __vmwrite(XSS_EXIT_BITMAP, 0);
vmx_vmcs_exit(v);
static unsigned int __init vmx_init_msr(void)
{
- return !!cpu_has_mpx;
+ return !!cpu_has_mpx + !!cpu_has_xsaves;
}
static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
}
vmx_vmcs_exit(v);
+
+ if ( cpu_has_xsaves )
+ {
+ ctxt->msr[ctxt->count].val = v->arch.hvm_vcpu.msr_xss;
+ if ( ctxt->msr[ctxt->count].val )
+ ctxt->msr[ctxt->count++].index = MSR_IA32_XSS;
+ }
}
static int vmx_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
else
err = -ENXIO;
break;
+ case MSR_IA32_XSS:
+ if ( cpu_has_xsaves )
+ v->arch.hvm_vcpu.msr_xss = ctxt->msr[i].val;
+ else
+ err = -ENXIO;
+ break;
default:
continue;
}
}
}
+static void vmx_handle_xsaves(void)
+{
+ gdprintk(XENLOG_ERR, "xsaves should not cause vmexit\n");
+ domain_crash(current->domain);
+}
+
+static void vmx_handle_xrstors(void)
+{
+ gdprintk(XENLOG_ERR, "xrstors should not cause vmexit\n");
+ domain_crash(current->domain);
+}
+
static int vmx_handle_apic_write(void)
{
unsigned long exit_qualification;
vmx_vcpu_flush_pml_buffer(v);
break;
+ case EXIT_REASON_XSAVES:
+ vmx_handle_xsaves();
+ break;
+
+ case EXIT_REASON_XRSTORS:
+ vmx_handle_xrstors();
+ break;
+
case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
case EXIT_REASON_ACCESS_LDTR_OR_TR:
case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
u64 __read_mostly xfeature_mask;
static unsigned int *__read_mostly xstate_offsets;
-static unsigned int *__read_mostly xstate_sizes;
+unsigned int *__read_mostly xstate_sizes;
static unsigned int __read_mostly xstate_features;
static unsigned int __read_mostly xstate_comp_offsets[sizeof(xfeature_mask)*8];
#define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS 0x00040000
+#define SECONDARY_EXEC_XSAVES 0x00100000
extern u32 vmx_secondary_exec_control;
#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
#define cpu_has_vmx_pml \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_xsaves \
+ (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
#define VMCS_RID_TYPE_MASK 0x80000000
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
VIRT_EXCEPTION_INFO = 0x0000202a,
+ XSS_EXIT_BITMAP = 0x0000202c,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
VMCS_LINK_POINTER = 0x00002800,
GUEST_IA32_DEBUGCTL = 0x00002802,
#define EXIT_REASON_INVPCID 58
#define EXIT_REASON_VMFUNC 59
#define EXIT_REASON_PML_FULL 62
+#define EXIT_REASON_XSAVES 63
+#define EXIT_REASON_XRSTORS 64
/*
* Interruption-information format
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
extern u64 xfeature_mask;
+extern unsigned int *xstate_sizes;
/* extended state save area */
struct __packed __attribute__((aligned (64))) xsave_struct