From: kfraser@localhost.localdomain Date: Tue, 7 Nov 2006 17:48:18 +0000 (+0000) Subject: [SVM] Fix an interrupt race window in the do_launch/vmrun/vmexit loop for AMD-V. X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=7d6376a63ad1721915d1f51ab6ecaf7449f3e134;p=xen.git [SVM] Fix an interrupt race window in the do_launch/vmrun/vmexit loop for AMD-V. There is also some comment cleanup in this patch. This problem found by Virtual Iron (Dave Winchell), and patch also provided by VI. Signed-off-by: Tom Woller Signed-off-by: Dave Winchell --- diff --git a/xen/arch/x86/hvm/svm/x86_32/exits.S b/xen/arch/x86/hvm/svm/x86_32/exits.S index 36fa80b680..2cd913e16b 100644 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S @@ -34,7 +34,7 @@ * At VMExit time the processor saves the guest selectors, esp, eip, * and eflags. Therefore we don't save them, but simply decrement * the kernel stack pointer to make it consistent with the stack frame - * at usual interruption time. The eflags of the host is not saved by VMX, + * at usual interruption time. The eflags of the host is not saved by AMD-V, * and we set it to the fixed value. * * We also need the room, especially because orig_eax field is used @@ -89,8 +89,8 @@ #define CLGI .byte 0x0F,0x01,0xDD ENTRY(svm_asm_do_launch) - sti CLGI + sti GET_CURRENT(%ebx) movl VCPU_svm_vmcb(%ebx), %ecx movl 24(%esp), %eax @@ -152,9 +152,8 @@ svm_restore_all_guest: call svm_intr_assist call svm_asid call svm_load_cr2 - sti /* - * Check if we are going back to SVM-based VM + * Check if we are going back to AMD-V based VM * By this time, all the setups in the VMCB must be complete. */ jmp svm_asm_do_launch diff --git a/xen/arch/x86/hvm/svm/x86_64/exits.S b/xen/arch/x86/hvm/svm/x86_64/exits.S index 823c02378d..0c9aa641a3 100644 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S @@ -1,5 +1,5 @@ /* - * exits.S: SVM architecture-specific exit handling. + * exits.S: AMD-V architecture-specific exit handling. * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, AMD Corporation. * @@ -34,7 +34,7 @@ * At VMExit time the processor saves the guest selectors, rsp, rip, * and rflags. Therefore we don't save them, but simply decrement * the kernel stack pointer to make it consistent with the stack frame - * at usual interruption time. The rflags of the host is not saved by VMX, + * at usual interruption time. The rflags of the host is not saved by AMD-V, * and we set it to the fixed value. * * We also need the room, especially because orig_eax field is used @@ -99,8 +99,8 @@ #define CLGI .byte 0x0F,0x01,0xDD ENTRY(svm_asm_do_launch) - sti CLGI + sti GET_CURRENT(%rbx) movq VCPU_svm_vmcb(%rbx), %rcx movq UREGS_rax(%rsp), %rax @@ -165,10 +165,9 @@ svm_restore_all_guest: call svm_intr_assist call svm_asid call svm_load_cr2 - sti /* - * Check if we are going back to VMX-based VM - * By this time, all the setups in the VMCS must be complete. + * Check if we are going back to AMD-V based VM + * By this time, all the setups in the VMCB must be complete. */ jmp svm_asm_do_launch