From: Brian Woods Date: Tue, 31 Oct 2017 22:03:08 +0000 (-0500) Subject: x86/svm: add virtual VMLOAD/VMSAVE support X-Git-Tag: archive/raspbian/4.11.1-1+rpi1~1^2~66^2~980 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=26f9a18485b5daf5215c8032a3049821c374b148;p=xen.git x86/svm: add virtual VMLOAD/VMSAVE support On AMD family 17h server processors, there is a feature called virtual VMLOAD/VMSAVE. This allows a nested hypervisor to preform a VMLOAD or VMSAVE without needing to be intercepted by the host hypervisor. Virtual VMLOAD/VMSAVE requires the host hypervisor to be in long mode and nested page tables to be enabled. For more information about it please see: AMD64 Architecture Programmer’s Manual Volume 2: System Programming http://support.amd.com/TechDocs/24593.pdf Section: VMSAVE and VMLOAD Virtualization (Section 15.33.1) This patch series adds support to check for and enable the virtual VMLOAD/VMSAVE features if available. Signed-off-by: Brian Woods Reviewed-by: Andrew Cooper Reviewed-by: Boris Ostrovsky --- diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index c8ffb17515..60b1288a31 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1669,6 +1669,7 @@ const struct hvm_function_table * __init start_svm(void) P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT"); P(cpu_has_svm_cleanbits, "VMCB Clean Bits"); P(cpu_has_svm_decode, "DecodeAssists"); + P(cpu_has_svm_vloadsave, "Virtual VMLOAD/VMSAVE"); P(cpu_has_pause_filter, "Pause-Intercept Filter"); P(cpu_has_tsc_ratio, "TSC Rate MSR"); #undef P diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c index 89ef2db932..091c58fa1b 100644 --- a/xen/arch/x86/hvm/svm/svmdebug.c +++ b/xen/arch/x86/hvm/svm/svmdebug.c @@ -55,6 +55,8 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) vmcb->exitinfo1, vmcb->exitinfo2); printk("np_enable = %#"PRIx64" guest_asid = %#x\n", vmcb_get_np_enable(vmcb), vmcb_get_guest_asid(vmcb)); + printk("virtual vmload/vmsave = %d, virt_ext = %#"PRIx64"\n", + vmcb->virt_ext.fields.vloadsave_enable, vmcb->virt_ext.bytes); printk("cpl = %d efer = %#"PRIx64" star = %#"PRIx64" lstar = %#"PRIx64"\n", vmcb_get_cpl(vmcb), vmcb_get_efer(vmcb), vmcb->star, vmcb->lstar); printk("CR0 = 0x%016"PRIx64" CR2 = 0x%016"PRIx64"\n", diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 997e7597e0..2e48fdde32 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -200,6 +200,14 @@ static int construct_vmcb(struct vcpu *v) /* PAT is under complete control of SVM when using nested paging. */ svm_disable_intercept_for_msr(v, MSR_IA32_CR_PAT); + + /* Use virtual VMLOAD/VMSAVE if available. */ + if ( cpu_has_svm_vloadsave ) + { + vmcb->virt_ext.fields.vloadsave_enable = 1; + vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMLOAD; + vmcb->_general2_intercepts &= ~GENERAL2_INTERCEPT_VMSAVE; + } } else {