From 8b022d0005d5b941cd078f640cae04711f5536c1 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Fri, 2 Mar 2018 16:19:29 +0000 Subject: [PATCH] vvmx: fixes after CR4 trapping optimizations MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Commit 40681735502 doesn't update nested VMX code in order to take into account L1 CR4 host mask when nested guest (L2) writes to CR4, and thus the mask written to CR4_GUEST_HOST_MASK is likely not as restrictive as it should be. Also the VVMCS GUEST_CR4 value should be updated to match the underlying value when syncing the VVMCS state. Fixes: 40681735502 ("vmx/hap: optimize CR4 trapping") Signed-off-by: Roger Pau Monné Reviewed-by: Sergey Dyasli Acked-by: Kevin Tian --- xen/arch/x86/hvm/vmx/vmx.c | 4 ++++ xen/arch/x86/hvm/vmx/vvmx.c | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 5cee364b0c..18d8ce2303 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1617,6 +1617,10 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr, v->arch.hvm_vmx.cr4_host_mask |= ~v->domain->arch.monitor.write_ctrlreg_mask[VM_EVENT_X86_CR4]; + if ( nestedhvm_vcpu_in_guestmode(v) ) + /* Add the nested host mask to get the more restrictive one. */ + v->arch.hvm_vmx.cr4_host_mask |= get_vvmcs(v, + CR4_GUEST_HOST_MASK); } __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask); diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 8176736e8f..dcd3b28f86 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1101,7 +1101,8 @@ static void load_shadow_guest_state(struct vcpu *v) (get_vvmcs(v, CR4_READ_SHADOW) & cr_gh_mask); __vmwrite(CR4_READ_SHADOW, cr_read_shadow); /* Add the nested host mask to the one set by vmx_update_guest_cr. */ - __vmwrite(CR4_GUEST_HOST_MASK, cr_gh_mask | v->arch.hvm_vmx.cr4_host_mask); + v->arch.hvm_vmx.cr4_host_mask |= cr_gh_mask; + __vmwrite(CR4_GUEST_HOST_MASK, v->arch.hvm_vmx.cr4_host_mask); /* TODO: CR3 target control */ } @@ -1232,6 +1233,10 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) /* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */ if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) ) shadow_to_vvmcs(v, GUEST_CR3); + + if ( v->arch.hvm_vmx.cr4_host_mask != ~0UL ) + /* Only need to update nested GUEST_CR4 if not all bits are trapped. */ + set_vvmcs(v, GUEST_CR4, v->arch.hvm_vcpu.guest_cr[4]); } static void sync_vvmcs_ro(struct vcpu *v) -- 2.30.2