From d2211e14d3435cf242da59d33204ab8dcad734b1 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 3 Jan 2017 09:42:10 +0100 Subject: [PATCH] x86/MSR: introduce MSR access split/fold helpers This is in preparation of eliminating the mis-naming of 64-bit fields with 32-bit register names (eflags instead of rflags etc). Use the guaranteed 32-bit underscore prefixed names for now where appropriate. Signed-off-by: Jan Beulich Acked-by: Andrew Cooper Reviewed-by: Kevin Tian Reviewed-by: Suravee Suthikulpanit --- xen/arch/x86/hvm/hvm.c | 7 ++----- xen/arch/x86/hvm/svm/svm.c | 11 +++-------- xen/arch/x86/hvm/vmx/vmx.c | 15 +++++---------- xen/arch/x86/hvm/vmx/vvmx.c | 6 +----- xen/arch/x86/time.c | 7 ++----- xen/arch/x86/traps.c | 7 +------ xen/include/asm-x86/msr.h | 11 +++++++++++ 7 files changed, 25 insertions(+), 39 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 708f4746e5..70afcc6ceb 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3695,12 +3695,9 @@ static uint64_t _hvm_rdtsc_intercept(void) void hvm_rdtsc_intercept(struct cpu_user_regs *regs) { - uint64_t tsc = _hvm_rdtsc_intercept(); + msr_split(regs, _hvm_rdtsc_intercept()); - regs->eax = (uint32_t)tsc; - regs->edx = (uint32_t)(tsc >> 32); - - HVMTRACE_2D(RDTSC, regs->eax, regs->edx); + HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx); } int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 811ea4e5b4..97f3d6556f 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1936,14 +1936,10 @@ static void svm_do_msr_access(struct cpu_user_regs *regs) rc = hvm_msr_read_intercept(regs->_ecx, &msr_content); if ( rc == X86EMUL_OKAY ) - { - regs->rax = (uint32_t)msr_content; - regs->rdx = (uint32_t)(msr_content >> 32); - } + msr_split(regs, msr_content); } else - rc = hvm_msr_write_intercept(regs->_ecx, - (regs->rdx << 32) | regs->_eax, 1); + rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1); if ( rc == X86EMUL_OKAY ) __update_guest_eip(regs, inst_len); @@ -2618,8 +2614,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) if ( vmcb_get_cpl(vmcb) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) && - hvm_handle_xsetbv(regs->ecx, - (regs->rdx << 32) | regs->_eax) == 0 ) + hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 ) __update_guest_eip(regs, inst_len); break; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index d50d49eee5..68db0cb645 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -3626,22 +3626,18 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) case EXIT_REASON_MSR_READ: { uint64_t msr_content; - if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY ) + if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY ) { - regs->eax = (uint32_t)msr_content; - regs->edx = (uint32_t)(msr_content >> 32); + msr_split(regs, msr_content); update_guest_eip(); /* Safe: RDMSR */ } break; } + case EXIT_REASON_MSR_WRITE: - { - uint64_t msr_content; - msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax; - if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) == X86EMUL_OKAY ) + if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY ) update_guest_eip(); /* Safe: WRMSR */ break; - } case EXIT_REASON_VMXOFF: if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY ) @@ -3802,8 +3798,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) break; case EXIT_REASON_XSETBV: - if ( hvm_handle_xsetbv(regs->ecx, - (regs->rdx << 32) | regs->_eax) == 0 ) + if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 ) update_guest_eip(); /* Safe: XSETBV */ break; diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 6c7e92b791..d53c5762fc 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -2322,15 +2322,11 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, nvcpu->nv_vmexit_pending = 1; else { - uint64_t tsc; - /* * special handler is needed if L1 doesn't intercept rdtsc, * avoiding changing guest_tsc and messing up timekeeping in L1 */ - tsc = hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET); - regs->eax = (uint32_t)tsc; - regs->edx = (uint32_t)(tsc >> 32); + msr_split(regs, hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET)); update_guest_eip(); return 1; diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index cb6939eb29..b89fa13068 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1918,13 +1918,10 @@ void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp) spin_unlock(&d->arch.vtsc_lock); - now = gtime_to_gtsc(d, now); - - regs->eax = (uint32_t)now; - regs->edx = (uint32_t)(now >> 32); + msr_split(regs, gtime_to_gtsc(d, now)); if ( rdtscp ) - regs->ecx = + regs->rcx = (d->arch.tsc_mode == TSC_MODE_PVRDTSCP) ? d->arch.incarnation : 0; } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 8005208764..2d211d1621 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3404,12 +3404,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs) else if ( currd->arch.vtsc ) pv_soft_rdtsc(curr, regs, 0); else - { - uint64_t val = rdtsc(); - - regs->eax = (uint32_t)val; - regs->edx = (uint32_t)(val >> 32); - } + msr_split(regs, rdtsc()); } if ( ctxt.ctxt.retire.singlestep ) diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index f3b85d0352..f5900ab3af 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -71,6 +71,17 @@ static inline int wrmsr_safe(unsigned int msr, uint64_t val) return _rc; } +static inline uint64_t msr_fold(const struct cpu_user_regs *regs) +{ + return (regs->rdx << 32) | regs->_eax; +} + +static inline void msr_split(struct cpu_user_regs *regs, uint64_t val) +{ + regs->rdx = val >> 32; + regs->rax = (uint32_t)val; +} + static inline uint64_t rdtsc(void) { uint32_t low, high; -- 2.30.2