*/
call_policy_changed = (is_hvm_domain(d) &&
((old_7d0 ^ p->feat.raw[0].d) &
- cpufeat_mask(X86_FEATURE_IBRSB)));
+ (cpufeat_mask(X86_FEATURE_IBRSB) |
+ cpufeat_mask(X86_FEATURE_L1D_FLUSH))));
break;
case 0xa:
vmx_clear_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
else
vmx_set_msr_intercept(v, MSR_PRED_CMD, VMX_MSR_RW);
+
+ /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
+ if ( cp->feat.l1d_flush )
+ vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+ else
+ vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
}
int vmx_guest_x86_mode(struct vcpu *v)
case MSR_AMD_PATCHLOADER:
case MSR_IA32_UCODE_WRITE:
case MSR_PRED_CMD:
+ case MSR_FLUSH_CMD:
/* Write-only */
goto gp_fault;
wrmsrl(MSR_PRED_CMD, val);
break;
+ case MSR_FLUSH_CMD:
+ if ( !cp->feat.l1d_flush )
+ goto gp_fault; /* MSR available? */
+
+ if ( val & ~FLUSH_CMD_L1D )
+ goto gp_fault; /* Rsvd bit set? */
+
+ if ( v == curr )
+ wrmsrl(MSR_FLUSH_CMD, val);
+ break;
+
case MSR_INTEL_MISC_FEATURES_ENABLES:
{
bool old_cpuid_faulting = msrs->misc_features_enables.cpuid_faulting;
XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */
-XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /* MSR_FLUSH_CMD and L1D flush. */
+XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /*S MSR_FLUSH_CMD and L1D flush. */
XEN_CPUFEATURE(ARCH_CAPS, 9*32+29) /* IA32_ARCH_CAPABILITIES MSR */
XEN_CPUFEATURE(SSBD, 9*32+31) /*A MSR_SPEC_CTRL.SSBD available */