__vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
__vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
+ v->arch.hvm_vmx.cr4_host_mask = ~0UL;
__vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
__vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
}
__vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+
+ if ( !paging_mode_hap(v->domain) )
+ /*
+ * Shadow path has not been optimized because it requires
+ * unconditionally trapping more CR4 bits, at which point the
+ * performance benefit of doing this is quite dubious.
+ */
+ v->arch.hvm_vmx.cr4_host_mask = ~0UL;
+ else
+ {
+ /*
+ * Update CR4 host mask to only trap when the guest tries to set
+ * bits that are controlled by the hypervisor.
+ */
+ v->arch.hvm_vmx.cr4_host_mask = HVM_CR4_HOST_MASK | X86_CR4_PKE |
+ ~hvm_cr4_guest_valid_bits(v, 0);
+ v->arch.hvm_vmx.cr4_host_mask |= v->arch.hvm_vmx.vmx_realmode ?
+ X86_CR4_VME : 0;
+ v->arch.hvm_vmx.cr4_host_mask |= !hvm_paging_enabled(v) ?
+ (X86_CR4_PSE | X86_CR4_SMEP |
+ X86_CR4_SMAP)
+ : 0;
+ if ( v->domain->arch.monitor.write_ctrlreg_enabled &
+ monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4) )
+ v->arch.hvm_vmx.cr4_host_mask |=
+ ~v->domain->arch.monitor.write_ctrlreg_mask[VM_EVENT_X86_CR4];
+
+ }
+ __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask);
+
break;
case 2:
if ( paging_mode_hap(v->domain) )
{
+ /*
+ * Xen allows the guest to modify some CR4 bits directly, update cached
+ * values to match.
+ */
+ __vmread(GUEST_CR4, &v->arch.hvm_vcpu.hw_cr[4]);
+ v->arch.hvm_vcpu.guest_cr[4] &= v->arch.hvm_vmx.cr4_host_mask;
+ v->arch.hvm_vcpu.guest_cr[4] |= v->arch.hvm_vcpu.hw_cr[4] &
+ ~v->arch.hvm_vmx.cr4_host_mask;
+
__vmread(GUEST_CR3, &v->arch.hvm_vcpu.hw_cr[3]);
if ( vmx_unrestricted_guest(v) || hvm_paging_enabled(v) )
v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3];
cr_read_shadow = (get_vvmcs(v, GUEST_CR4) & ~cr_gh_mask) |
(get_vvmcs(v, CR4_READ_SHADOW) & cr_gh_mask);
__vmwrite(CR4_READ_SHADOW, cr_read_shadow);
+ /* Add the nested host mask to the one set by vmx_update_guest_cr. */
+ __vmwrite(CR4_GUEST_HOST_MASK, cr_gh_mask | v->arch.hvm_vmx.cr4_host_mask);
/* TODO: CR3 target control */
}
ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
}
- if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
+ if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index ||
+ VM_EVENT_X86_CR4 == mop->u.mov_to_cr.index )
{
struct vcpu *v;
- /* Latches new CR3 mask through CR0 code. */
+ /* Latches new CR3 or CR4 mask through CR0 code. */
for_each_vcpu ( d, v )
hvm_update_guest_cr(v, 0);
}