From: Andrew Cooper Date: Wed, 2 Nov 2016 14:36:49 +0000 (+0000) Subject: x86/hvm: Don't raise #GP behind the emulators back for MSR accesses X-Git-Tag: archive/raspbian/4.11.1-1+rpi1~1^2~66^2~2728 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=49de10f3c1718bb952f4b075d07f37eb9f605b2b;p=xen.git x86/hvm: Don't raise #GP behind the emulators back for MSR accesses The current hvm_msr_{read,write}_intercept() infrastructure calls hvm_inject_hw_exception() directly to latch a fault, and returns X86EMUL_EXCEPTION to its caller. This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as the fault is raised behind the back of the x86 emulator. Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault. Signed-off-by: Andrew Cooper Acked-by: Kevin Tian Reviewed-by: Jan Beulich Paul Durrant Reviewed-by: Boris Ostrovsky --- diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 14f9b43b9c..edcae5e64b 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1544,7 +1544,12 @@ static int hvmemul_read_msr( uint64_t *val, struct x86_emulate_ctxt *ctxt) { - return hvm_msr_read_intercept(reg, val); + int rc = hvm_msr_read_intercept(reg, val); + + if ( rc == X86EMUL_EXCEPTION ) + x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt); + + return rc; } static int hvmemul_write_msr( @@ -1552,7 +1557,12 @@ static int hvmemul_write_msr( uint64_t val, struct x86_emulate_ctxt *ctxt) { - return hvm_msr_write_intercept(reg, val, 1); + int rc = hvm_msr_write_intercept(reg, val, 1); + + if ( rc == X86EMUL_EXCEPTION ) + x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt); + + return rc; } static int hvmemul_wbinvd( diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 6621d629a5..08855c2d8d 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -518,7 +518,10 @@ void hvm_do_resume(struct vcpu *v) if ( w->do_write.msr ) { - hvm_msr_write_intercept(w->msr, w->value, 0); + if ( hvm_msr_write_intercept(w->msr, w->value, 0) == + X86EMUL_EXCEPTION ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + w->do_write.msr = 0; } @@ -3455,7 +3458,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return ret; gp_fault: - hvm_inject_hw_exception(TRAP_gp_fault, 0); ret = X86EMUL_EXCEPTION; *msr_content = -1ull; goto out; @@ -3600,7 +3602,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content, return ret; gp_fault: - hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 79924a5d5f..10149591a4 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) return X86EMUL_OKAY; gpf: - hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) return result; gpf: - hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -1924,6 +1922,8 @@ static void svm_do_msr_access(struct cpu_user_regs *regs) if ( rc == X86EMUL_OKAY ) __update_guest_eip(regs, inst_len); + else if ( rc == X86EMUL_EXCEPTION ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); } static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb, diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index e0c2831100..59fa0aa4af 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2673,7 +2673,6 @@ done: return X86EMUL_OKAY; gp_fault: - hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -2910,7 +2909,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) return X86EMUL_OKAY; gp_fault: - hvm_inject_hw_exception(TRAP_gp_fault, 0); return X86EMUL_EXCEPTION; } @@ -3603,18 +3601,33 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) break; case EXIT_REASON_MSR_READ: { - uint64_t msr_content; - if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY ) + uint64_t msr_content = 0; + + switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) ) { + case X86EMUL_OKAY: msr_split(regs, msr_content); update_guest_eip(); /* Safe: RDMSR */ + break; + + case X86EMUL_EXCEPTION: + hvm_inject_hw_exception(TRAP_gp_fault, 0); + break; } break; } case EXIT_REASON_MSR_WRITE: - if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY ) + switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) ) + { + case X86EMUL_OKAY: update_guest_eip(); /* Safe: WRMSR */ + break; + + case X86EMUL_EXCEPTION: + hvm_inject_hw_exception(TRAP_gp_fault, 0); + break; + } break; case EXIT_REASON_VMXOFF: diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 74775dd473..0aef7a75c1 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1032,6 +1032,7 @@ static void load_shadow_guest_state(struct vcpu *v) struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 control; u64 cr_gh_mask, cr_read_shadow; + int rc; static const u16 vmentry_fields[] = { VM_ENTRY_INTR_INFO, @@ -1053,8 +1054,12 @@ static void load_shadow_guest_state(struct vcpu *v) if ( control & VM_ENTRY_LOAD_GUEST_PAT ) hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT)); if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL ) - hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, - get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0); + { + rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, + get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0); + if ( rc == X86EMUL_EXCEPTION ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + } hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); @@ -1222,7 +1227,7 @@ static void sync_vvmcs_ro(struct vcpu *v) static void load_vvmcs_host_state(struct vcpu *v) { - int i; + int i, rc; u64 r; u32 control; @@ -1240,8 +1245,12 @@ static void load_vvmcs_host_state(struct vcpu *v) if ( control & VM_EXIT_LOAD_HOST_PAT ) hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT)); if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL ) - hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, - get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1); + { + rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, + get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1); + if ( rc == X86EMUL_EXCEPTION ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + } hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 262955dc43..5e256989ab 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -121,13 +121,19 @@ int hvm_set_efer(uint64_t value); int hvm_set_cr0(unsigned long value, bool_t may_defer); int hvm_set_cr3(unsigned long value, bool_t may_defer); int hvm_set_cr4(unsigned long value, bool_t may_defer); -int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content); -int hvm_msr_write_intercept( - unsigned int msr, uint64_t msr_content, bool_t may_defer); int hvm_mov_to_cr(unsigned int cr, unsigned int gpr); int hvm_mov_from_cr(unsigned int cr, unsigned int gpr); void hvm_ud_intercept(struct cpu_user_regs *); +/* + * May return X86EMUL_EXCEPTION, at which point the caller is responsible for + * injecting a #GP fault. Used to support speculative reads. + */ +int __must_check hvm_msr_read_intercept( + unsigned int msr, uint64_t *msr_content); +int __must_check hvm_msr_write_intercept( + unsigned int msr, uint64_t msr_content, bool_t may_defer); + #endif /* __ASM_X86_HVM_SUPPORT_H__ */ /*