((sizeof(long) != 8) && (value & EFER_LME)) ||
(!cpu_has_nx && (value & EFER_NX)) ||
(!cpu_has_syscall && (value & EFER_SCE)) ||
+ (!cpu_has_lmsl && (value & EFER_LMSLE)) ||
(!cpu_has_ffxsr && (value & EFER_FFXSE)) ||
((value & (EFER_LME|EFER_LMA)) == EFER_LMA));
}
}
if ( !hvm_efer_valid(
- ctxt.msr_efer, EFER_FFXSE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
+ ctxt.msr_efer,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
ctxt.msr_efer);
value &= ~EFER_LMA;
- if ( !hvm_efer_valid(value, EFER_FFXSE | EFER_LME | EFER_NX | EFER_SCE) )
+ if ( !hvm_efer_valid(value,
+ EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_NX | EFER_SCE) )
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: %"PRIx64"\n", value);
u32 svm_feature_flags;
+/* Indicates whether guests may use EFER.LMSLE. */
+bool_t cpu_has_lmsl;
+
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
/* Initialize core's ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there's no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( eax & EFER_LMSLE )
+ {
+ if ( c == &boot_cpu_data )
+ cpu_has_lmsl = 1;
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+ }
+ else
+ {
+ if ( cpu_has_lmsl )
+ printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n");
+ cpu_has_lmsl = 0;
+ }
+#endif
+
return 1;
}
extern struct hvm_function_table hvm_funcs;
extern int hvm_enabled;
+extern bool_t cpu_has_lmsl;
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);