From: Roger Pau Monné Date: Thu, 9 Feb 2017 10:02:11 +0000 (+0100) Subject: x86/hvm: add vcpu parameter to guest memory copy function X-Git-Tag: archive/raspbian/4.11.1-1+rpi1~1^2~66^2~2829 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=ea4e7040eedd81704a6f3ff5fa2e6d6e0bb374c9;p=xen.git x86/hvm: add vcpu parameter to guest memory copy function Current __hvm_copy assumes that the destination memory belongs to the current vcpu, but this is not always the case since for PVHv2 Dom0 build hvm copy functions are used with current being the idle vcpu. Add a new vcpu parameter to hvm copy in order to solve that. Note that only hvm_copy_to_guest_phys is changed to take a vcpu parameter, because that's the only one at the moment that's required in order to build a PVHv2 Dom0. While there, also assert that the passed vcpu belongs to a HVM guest. Signed-off-by: Roger Pau Monné Reviewed-by: Jan Beulich Reviewed-by: Kevin Tian Fix the build, retaining prior log message attributes. Signed-off-by: Jan Beulich --- diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 0d21fe1902..fed88012c4 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1294,7 +1294,7 @@ static int hvmemul_rep_movs( rc = hvm_copy_from_guest_phys(buf, sgpa, bytes); if ( rc == HVMCOPY_okay ) - rc = hvm_copy_to_guest_phys(dgpa, buf, bytes); + rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current); xfree(buf); @@ -1405,7 +1405,7 @@ static int hvmemul_rep_stos( if ( df ) gpa -= bytes - bytes_per_rep; - rc = hvm_copy_to_guest_phys(gpa, buf, bytes); + rc = hvm_copy_to_guest_phys(gpa, buf, bytes, current); if ( buf != p_data ) xfree(buf); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 9ffc21bb44..5f7275887f 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3082,16 +3082,17 @@ void hvm_task_switch( #define HVMCOPY_phys (0u<<2) #define HVMCOPY_linear (1u<<2) static enum hvm_copy_result __hvm_copy( - void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec, - pagefault_info_t *pfinfo) + void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags, + uint32_t pfec, pagefault_info_t *pfinfo) { - struct vcpu *curr = current; unsigned long gfn; struct page_info *page; p2m_type_t p2mt; char *p; int count, todo = size; + ASSERT(has_hvm_container_vcpu(v)); + /* * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops * such as query_size. Grant-table code currently does copy_to/from_guest @@ -3116,7 +3117,7 @@ static enum hvm_copy_result __hvm_copy( if ( flags & HVMCOPY_linear ) { - gfn = paging_gva_to_gfn(curr, addr, &pfec); + gfn = paging_gva_to_gfn(v, addr, &pfec); if ( gfn == gfn_x(INVALID_GFN) ) { if ( pfec & PFEC_page_paged ) @@ -3143,12 +3144,12 @@ static enum hvm_copy_result __hvm_copy( * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses, * - newer Windows (like Server 2012) for HPET accesses. */ - if ( !nestedhvm_vcpu_in_guestmode(curr) - && is_hvm_vcpu(curr) + if ( v == current && is_hvm_vcpu(v) + && !nestedhvm_vcpu_in_guestmode(v) && hvm_mmio_internal(gpa) ) return HVMCOPY_bad_gfn_to_mfn; - page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE); + page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE); if ( !page ) return HVMCOPY_bad_gfn_to_mfn; @@ -3156,7 +3157,7 @@ static enum hvm_copy_result __hvm_copy( if ( p2m_is_paging(p2mt) ) { put_page(page); - p2m_mem_paging_populate(curr->domain, gfn); + p2m_mem_paging_populate(v->domain, gfn); return HVMCOPY_gfn_paged_out; } if ( p2m_is_shared(p2mt) ) @@ -3178,9 +3179,9 @@ static enum hvm_copy_result __hvm_copy( { static unsigned long lastpage; if ( xchg(&lastpage, gfn) != gfn ) - gdprintk(XENLOG_DEBUG, "guest attempted write to read-only" - " memory page. gfn=%#lx, mfn=%#lx\n", - gfn, page_to_mfn(page)); + dprintk(XENLOG_G_DEBUG, + "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n", + v, gfn, page_to_mfn(page)); } else { @@ -3188,7 +3189,7 @@ static enum hvm_copy_result __hvm_copy( memcpy(p, buf, count); else memset(p, 0, count); - paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page))); + paging_mark_dirty(v->domain, _mfn(page_to_mfn(page))); } } else @@ -3209,16 +3210,16 @@ static enum hvm_copy_result __hvm_copy( } enum hvm_copy_result hvm_copy_to_guest_phys( - paddr_t paddr, void *buf, int size) + paddr_t paddr, void *buf, int size, struct vcpu *v) { - return __hvm_copy(buf, paddr, size, + return __hvm_copy(buf, paddr, size, v, HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL); } enum hvm_copy_result hvm_copy_from_guest_phys( void *buf, paddr_t paddr, int size) { - return __hvm_copy(buf, paddr, size, + return __hvm_copy(buf, paddr, size, current, HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL); } @@ -3226,7 +3227,7 @@ enum hvm_copy_result hvm_copy_to_guest_linear( unsigned long addr, void *buf, int size, uint32_t pfec, pagefault_info_t *pfinfo) { - return __hvm_copy(buf, addr, size, + return __hvm_copy(buf, addr, size, current, HVMCOPY_to_guest | HVMCOPY_linear, PFEC_page_present | PFEC_write_access | pfec, pfinfo); } @@ -3235,7 +3236,7 @@ enum hvm_copy_result hvm_copy_from_guest_linear( void *buf, unsigned long addr, int size, uint32_t pfec, pagefault_info_t *pfinfo) { - return __hvm_copy(buf, addr, size, + return __hvm_copy(buf, addr, size, current, HVMCOPY_from_guest | HVMCOPY_linear, PFEC_page_present | pfec, pfinfo); } @@ -3244,7 +3245,7 @@ enum hvm_copy_result hvm_fetch_from_guest_linear( void *buf, unsigned long addr, int size, uint32_t pfec, pagefault_info_t *pfinfo) { - return __hvm_copy(buf, addr, size, + return __hvm_copy(buf, addr, size, current, HVMCOPY_from_guest | HVMCOPY_linear, PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo); } diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index 721fb38030..5157e9ef20 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -135,7 +135,7 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler, if ( p->data_is_ptr ) { switch ( hvm_copy_to_guest_phys(p->data + step * i, - &data, p->size) ) + &data, p->size, current) ) { case HVMCOPY_okay: break; diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index 7ef4e45fe3..40efad21d3 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -77,7 +77,7 @@ static void realmode_deliver_exception( pstk = regs->sp -= 6; pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base; - (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame)); + (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current); csr->sel = cs_eip >> 16; csr->base = (uint32_t)csr->sel << 4; diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index ba5899c54a..16550c5642 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -68,7 +68,7 @@ enum hvm_copy_result { * address range does not map entirely onto ordinary machine memory. */ enum hvm_copy_result hvm_copy_to_guest_phys( - paddr_t paddr, void *buf, int size); + paddr_t paddr, void *buf, int size, struct vcpu *v); enum hvm_copy_result hvm_copy_from_guest_phys( void *buf, paddr_t paddr, int size);