Because of a bug in 2010, LMSL support isn't available to guests.
c/s
f2c608444 noticed but avoided fixing the issue for migration reasons. In
addition to migration problems, changes to the segmentation logic for
emulation would be needed before the feature could be enabled.
This feature is entirely unused by operating systems (probably owing to its
semantics which only cover half the segment registers), and no one has
commented on its absence from Xen. As supporting it would involve a large
amount of effort, it seems better to remove the code entirely.
If someone finds a valid usecase, we can resurrecting the code and
implementing the remaining parts, but I doubt anyone will.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
if ( (value & EFER_SVME) && (!p->extd.svm || !nestedhvm_enabled(d)) )
return "SVME without nested virt";
- if ( (value & EFER_LMSLE) && !cpu_has_lmsl )
- return "LMSLE without support";
-
if ( (value & EFER_FFXSE) && !p->extd.ffxsr )
return "FFXSE without feature";
u32 svm_feature_flags;
-/* Indicates whether guests may use EFER.LMSLE. */
-bool_t cpu_has_lmsl;
-
static void svm_update_guest_efer(struct vcpu *);
static struct hvm_function_table svm_function_table;
/* Initialize core's ASID handling. */
svm_asid_init(c);
- /*
- * Check whether EFER.LMSLE can be written.
- * Unfortunately there's no feature bit defined for this.
- */
- msr_content = read_efer();
- if ( wrmsr_safe(MSR_EFER, msr_content | EFER_LMSLE) == 0 )
- rdmsrl(MSR_EFER, msr_content);
- if ( msr_content & EFER_LMSLE )
- {
- if ( 0 && /* FIXME: Migration! */ bsp )
- cpu_has_lmsl = 1;
- wrmsrl(MSR_EFER, msr_content ^ EFER_LMSLE);
- }
- else
- {
- if ( cpu_has_lmsl )
- printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n");
- cpu_has_lmsl = 0;
- }
-
/* Initialize OSVW bits to be used by guests */
svm_host_osvw_init();
* vendor-dependent behaviour.
*/
if ( is_pv_32bit_domain(currd) )
- *val &= ~(EFER_LME | EFER_LMA | EFER_LMSLE |
+ *val &= ~(EFER_LME | EFER_LMA |
(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
? EFER_SCE : 0));
return X86EMUL_OKAY;
extern struct hvm_function_table hvm_funcs;
extern bool_t hvm_enabled;
-extern bool_t cpu_has_lmsl;
extern s8 hvm_port80_allowed;
extern const struct hvm_function_table *start_svm(void);
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12 /* AMD: SVM enable */
-#define _EFER_LMSLE 13 /* AMD: Long-mode segment limit enable */
#define _EFER_FFXSE 14 /* AMD: Fast FXSAVE/FXRSTOR enable */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
-#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSE (1<<_EFER_FFXSE)
#define EFER_KNOWN_MASK (EFER_SCE | EFER_LME | EFER_LMA | EFER_NX | \
- EFER_SVME | EFER_LMSLE | EFER_FFXSE)
+ EFER_SVME | EFER_FFXSE)
/* Speculation Controls. */
#define MSR_SPEC_CTRL 0x00000048