From: kaf24@firebug.cl.cam.ac.uk Date: Sun, 11 Jun 2006 13:33:16 +0000 (+0100) Subject: [HVM][VMX] Cleanups and fixes to VMCS lifecycle. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~15972^2~17 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=81f8f63e5e3a02284c1087aeecf160a0e3bde1fc;p=xen.git [HVM][VMX] Cleanups and fixes to VMCS lifecycle. 1. Maintain a 'launched' software flag to select between VMLAUNCH and VMRESUME. 2. Take more care with VMPTRLD/VMCLEAR. Also various other fixes (e.g., safe testing of condition codes after executing a VMX instruction). Signed-off-by: Keir Fraser --- diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c index 85220890f5..d4906bcd53 100644 --- a/xen/arch/ia64/vmx/vmx_init.c +++ b/xen/arch/ia64/vmx/vmx_init.c @@ -288,9 +288,6 @@ vmx_final_setup_guest(struct vcpu *v) /* v->arch.schedule_tail = arch_vmx_do_launch; */ vmx_create_vp(v); - /* Set this ed to be vmx */ - set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags); - /* Physical mode emulation initialization, including * emulation ID allcation and related memory request */ diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index a2c408a433..0a0fb35da3 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -42,7 +42,7 @@ int vmcs_size; -struct vmcs_struct *alloc_vmcs(void) +struct vmcs_struct *vmx_alloc_vmcs(void) { struct vmcs_struct *vmcs; u32 vmx_msr_low, vmx_msr_high; @@ -64,47 +64,63 @@ static void free_vmcs(struct vmcs_struct *vmcs) free_xenheap_pages(vmcs, order); } -static int load_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) +static void __vmx_clear_vmcs(void *info) { - int error; - - if ((error = __vmptrld(phys_ptr))) { - clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return error; - } - set_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return 0; + struct vcpu *v = info; + __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); + v->arch.hvm_vmx.active_cpu = -1; + v->arch.hvm_vmx.launched = 0; } -static void vmx_smp_clear_vmcs(void *info) +static void vmx_clear_vmcs(struct vcpu *v) { - struct vcpu *v = (struct vcpu *)info; + unsigned int cpu = v->arch.hvm_vmx.active_cpu; - ASSERT(hvm_guest(v)); + if ( (cpu == -1) || (cpu == smp_processor_id()) ) + __vmx_clear_vmcs(v); + else + on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1); +} - if (v->arch.hvm_vmx.launch_cpu == smp_processor_id()) - __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); +static void vmx_load_vmcs(struct vcpu *v) +{ + __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); + v->arch.hvm_vmx.active_cpu = smp_processor_id(); } -void vmx_request_clear_vmcs(struct vcpu *v) +void vmx_vmcs_enter(struct vcpu *v) { - ASSERT(hvm_guest(v)); + /* + * NB. We must *always* run an HVM VCPU on its own VMCS, except for + * vmx_vmcs_enter/exit critical regions. This leads to some XXX TODOs XXX: + * 1. Move construct_vmcs() much earlier, to domain creation or + * context initialisation. + * 2. VMPTRLD as soon as we context-switch to a HVM VCPU. + * 3. VMCS destruction needs to happen later (from domain_destroy()). + */ + if ( v == current ) + return; - if (v->arch.hvm_vmx.launch_cpu == smp_processor_id()) - __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs)); - else - smp_call_function(vmx_smp_clear_vmcs, v, 1, 1); + vcpu_pause(v); + spin_lock(&v->arch.hvm_vmx.vmcs_lock); + + vmx_clear_vmcs(v); + vmx_load_vmcs(v); } -#if 0 -static int store_vmcs(struct arch_vmx_struct *arch_vmx, u64 phys_ptr) +void vmx_vmcs_exit(struct vcpu *v) { - /* take the current VMCS */ - __vmptrst(phys_ptr); - clear_bit(ARCH_VMX_VMCS_LOADED, &arch_vmx->flags); - return 0; + if ( v == current ) + return; + + /* Don't confuse arch_vmx_do_resume (for @v or @current!) */ + vmx_clear_vmcs(v); + if ( hvm_guest(current) ) + vmx_load_vmcs(current); + + spin_unlock(&v->arch.hvm_vmx.vmcs_lock); + vcpu_unpause(v); } -#endif static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx) { @@ -247,7 +263,6 @@ static void vmx_do_launch(struct vcpu *v) __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table)); v->arch.schedule_tail = arch_vmx_do_resume; - v->arch.hvm_vmx.launch_cpu = smp_processor_id(); /* init guest tsc to start from 0 */ set_guest_time(v, 0); @@ -410,53 +425,49 @@ static inline int construct_vmcs_host(void) /* * Need to extend to support full virtualization. */ -static int construct_vmcs(struct arch_vmx_struct *arch_vmx, +static int construct_vmcs(struct vcpu *v, cpu_user_regs_t *regs) { + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; int error; long rc; - u64 vmcs_phys_ptr; memset(arch_vmx, 0, sizeof(struct arch_vmx_struct)); + spin_lock_init(&arch_vmx->vmcs_lock); + arch_vmx->active_cpu = -1; + /* * Create a new VMCS */ - if (!(arch_vmx->vmcs = alloc_vmcs())) { + if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) { printk("Failed to create a new VMCS\n"); - rc = -ENOMEM; - goto err_out; + return -ENOMEM; } - vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs); - if ((error = __vmpclear(vmcs_phys_ptr))) { - printk("construct_vmcs: VMCLEAR failed\n"); - rc = -EINVAL; - goto err_out; - } - if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) { - printk("construct_vmcs: load_vmcs failed: VMCS = %lx\n", - (unsigned long) vmcs_phys_ptr); - rc = -EINVAL; - goto err_out; - } + vmx_clear_vmcs(v); + vmx_load_vmcs(v); + if ((error = construct_vmcs_controls(arch_vmx))) { printk("construct_vmcs: construct_vmcs_controls failed\n"); rc = -EINVAL; goto err_out; } + /* host selectors */ if ((error = construct_vmcs_host())) { printk("construct_vmcs: construct_vmcs_host failed\n"); rc = -EINVAL; goto err_out; } + /* guest selectors */ if ((error = construct_init_vmcs_guest(regs))) { printk("construct_vmcs: construct_vmcs_guest failed\n"); rc = -EINVAL; goto err_out; } + if ((error |= __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP))) { printk("construct_vmcs: setting Exception bitmap failed\n"); @@ -472,12 +483,16 @@ static int construct_vmcs(struct arch_vmx_struct *arch_vmx, return 0; err_out: - destroy_vmcs(arch_vmx); + vmx_destroy_vmcs(v); return rc; } -void destroy_vmcs(struct arch_vmx_struct *arch_vmx) +void vmx_destroy_vmcs(struct vcpu *v) { + struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx; + + vmx_clear_vmcs(v); + free_vmcs(arch_vmx->vmcs); arch_vmx->vmcs = NULL; @@ -506,22 +521,20 @@ void vm_resume_fail(unsigned long eflags) void arch_vmx_do_resume(struct vcpu *v) { - if ( v->arch.hvm_vmx.launch_cpu == smp_processor_id() ) + if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) { - load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs)); - vmx_do_resume(v); - reset_stack_and_jump(vmx_asm_do_resume); + vmx_load_vmcs(v); } else { - vmx_request_clear_vmcs(v); - load_vmcs(&v->arch.hvm_vmx, virt_to_maddr(v->arch.hvm_vmx.vmcs)); + vmx_clear_vmcs(v); + vmx_load_vmcs(v); vmx_migrate_timers(v); vmx_set_host_env(v); - vmx_do_resume(v); - v->arch.hvm_vmx.launch_cpu = smp_processor_id(); - reset_stack_and_jump(vmx_asm_do_relaunch); } + + vmx_do_resume(v); + reset_stack_and_jump(vmx_asm_do_vmentry); } void arch_vmx_do_launch(struct vcpu *v) @@ -529,7 +542,7 @@ void arch_vmx_do_launch(struct vcpu *v) int error; cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs; - error = construct_vmcs(&v->arch.hvm_vmx, regs); + error = construct_vmcs(v, regs); if ( error < 0 ) { if (v->vcpu_id == 0) { @@ -540,7 +553,7 @@ void arch_vmx_do_launch(struct vcpu *v) domain_crash_synchronous(); } vmx_do_launch(v); - reset_stack_and_jump(vmx_asm_do_launch); + reset_stack_and_jump(vmx_asm_do_vmentry); } @@ -613,17 +626,9 @@ static void vmcs_dump(unsigned char ch) } printk("\tVCPU %d\n", v->vcpu_id); - if (v != current) { - vcpu_pause(v); - __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); - } - + vmx_vmcs_enter(v); vmcs_dump_vcpu(); - - if (v != current) { - __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs)); - vcpu_unpause(v); - } + vmx_vmcs_exit(v); } } diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index de0fb2b444..70f291a27f 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -91,8 +91,7 @@ static void vmx_relinquish_guest_resources(struct domain *d) { if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) continue; - vmx_request_clear_vmcs(v); - destroy_vmcs(&v->arch.hvm_vmx); + vmx_destroy_vmcs(v); free_monitor_pagetable(v); kill_timer(&v->arch.hvm_vmx.hlt_timer); if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) ) @@ -402,54 +401,10 @@ void vmx_migrate_timers(struct vcpu *v) migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor); } -struct vmx_cpu_guest_regs_callback_info { - struct vcpu *v; - struct cpu_user_regs *regs; - unsigned long *crs; -}; - -static void vmx_store_cpu_guest_regs( - struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs); - -static void vmx_load_cpu_guest_regs( - struct vcpu *v, struct cpu_user_regs *regs); - -static void vmx_store_cpu_guest_regs_callback(void *data) -{ - struct vmx_cpu_guest_regs_callback_info *info = data; - vmx_store_cpu_guest_regs(info->v, info->regs, info->crs); -} - -static void vmx_load_cpu_guest_regs_callback(void *data) -{ - struct vmx_cpu_guest_regs_callback_info *info = data; - vmx_load_cpu_guest_regs(info->v, info->regs); -} - static void vmx_store_cpu_guest_regs( struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs) { - if ( v != current ) - { - /* Non-current VCPUs must be paused to get a register snapshot. */ - ASSERT(atomic_read(&v->pausecnt) != 0); - - if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() ) - { - /* Get register details from remote CPU. */ - struct vmx_cpu_guest_regs_callback_info info = { - .v = v, .regs = regs, .crs = crs }; - cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu); - on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback, - &info, 1, 1); - return; - } - - /* Register details are on this CPU. Load the correct VMCS. */ - __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); - } - - ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id()); + vmx_vmcs_enter(v); if ( regs != NULL ) { @@ -471,9 +426,7 @@ static void vmx_store_cpu_guest_regs( __vmread(CR4_READ_SHADOW, &crs[4]); } - /* Reload current VCPU's VMCS if it was temporarily unloaded. */ - if ( (v != current) && hvm_guest(current) ) - __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs)); + vmx_vmcs_exit(v); } /* @@ -517,26 +470,7 @@ static void fixup_vm86_seg_bases(struct cpu_user_regs *regs) void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) { - if ( v != current ) - { - /* Non-current VCPUs must be paused to set the register snapshot. */ - ASSERT(atomic_read(&v->pausecnt) != 0); - - if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() ) - { - struct vmx_cpu_guest_regs_callback_info info = { - .v = v, .regs = regs }; - cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu); - on_selected_cpus(cpumask, vmx_load_cpu_guest_regs_callback, - &info, 1, 1); - return; - } - - /* Register details are on this CPU. Load the correct VMCS. */ - __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs)); - } - - ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id()); + vmx_vmcs_enter(v); __vmwrite(GUEST_SS_SELECTOR, regs->ss); __vmwrite(GUEST_DS_SELECTOR, regs->ds); @@ -557,9 +491,7 @@ void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) __vmwrite(GUEST_CS_SELECTOR, regs->cs); __vmwrite(GUEST_RIP, regs->eip); - /* Reload current VCPU's VMCS if it was temporarily unloaded. */ - if ( (v != current) && hvm_guest(current) ) - __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs)); + vmx_vmcs_exit(v); } int vmx_realmode(struct vcpu *v) @@ -688,17 +620,20 @@ int start_vmx(void) set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ - if (!(vmcs = alloc_vmcs())) { + if (!(vmcs = vmx_alloc_vmcs())) { printk("Failed to allocate VMCS\n"); return 0; } phys_vmcs = (u64) virt_to_maddr(vmcs); - if (!(__vmxon(phys_vmcs))) { - printk("VMXON is done\n"); + if (__vmxon(phys_vmcs)) { + printk("VMXON failed\n"); + return 0; } + printk("VMXON is done\n"); + vmx_save_init_msrs(); /* Setup HVM interfaces */ diff --git a/xen/arch/x86/hvm/vmx/x86_32/exits.S b/xen/arch/x86/hvm/vmx/x86_32/exits.S index cb20f1280d..f78aea9dcf 100644 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S @@ -78,69 +78,49 @@ addl $(NR_SKIPPED_REGS*4), %esp ALIGN - ENTRY(vmx_asm_vmexit_handler) /* selectors are restored/saved by VMX */ HVM_SAVE_ALL_NOSEGREGS call vmx_trace_vmexit call vmx_vmexit_handler - jmp vmx_asm_do_resume + jmp vmx_asm_do_vmentry -.macro vmx_asm_common launch, initialized -1: -/* vmx_test_all_events */ - .if \initialized + ALIGN +vmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry + + ALIGN +ENTRY(vmx_asm_do_vmentry) GET_CURRENT(%ebx) -/*test_all_events:*/ - xorl %ecx,%ecx - notl %ecx cli # tests must not race interrupts -/*test_softirqs:*/ + movl VCPU_processor(%ebx),%eax shl $IRQSTAT_shift,%eax - test %ecx,irq_stat(%eax,1) - jnz 2f + cmpl $0,irq_stat(%eax,1) + jnz vmx_process_softirqs -/* vmx_restore_all_guest */ call vmx_intr_assist call vmx_load_cr2 call vmx_trace_vmentry - .endif + + cmpl $0,VCPU_vmx_launched(%ebx) + je vmx_launch + +/*vmx_resume:*/ HVM_RESTORE_ALL_NOSEGREGS - /* - * Check if we are going back to VMX-based VM - * By this time, all the setups in the VMCS must be complete. - */ - .if \launch - /* VMLAUNCH */ - .byte 0x0f,0x01,0xc2 - pushf - call vm_launch_fail - .else /* VMRESUME */ .byte 0x0f,0x01,0xc3 pushf call vm_resume_fail - .endif - /* Should never reach here */ - hlt + ud2 - ALIGN - .if \initialized -2: -/* vmx_process_softirqs */ - sti - call do_softirq - jmp 1b - ALIGN - .endif -.endm - -ENTRY(vmx_asm_do_launch) - vmx_asm_common 1, 0 - -ENTRY(vmx_asm_do_resume) - vmx_asm_common 0, 1 - -ENTRY(vmx_asm_do_relaunch) - vmx_asm_common 1, 1 +vmx_launch: + movl $1,VCPU_vmx_launched(%ebx) + HVM_RESTORE_ALL_NOSEGREGS + /* VMLAUNCH */ + .byte 0x0f,0x01,0xc2 + pushf + call vm_launch_fail + ud2 diff --git a/xen/arch/x86/hvm/vmx/x86_64/exits.S b/xen/arch/x86/hvm/vmx/x86_64/exits.S index d6b5904fb4..767a4087af 100644 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S @@ -88,68 +88,51 @@ popq %rdi; \ addq $(NR_SKIPPED_REGS*8), %rsp; + ALIGN ENTRY(vmx_asm_vmexit_handler) /* selectors are restored/saved by VMX */ HVM_SAVE_ALL_NOSEGREGS call vmx_trace_vmexit call vmx_vmexit_handler - jmp vmx_asm_do_resume + jmp vmx_asm_do_vmentry -.macro vmx_asm_common launch, initialized -1: - .if \initialized -/* vmx_test_all_events */ + ALIGN +vmx_process_softirqs: + sti + call do_softirq + jmp vmx_asm_do_vmentry + + ALIGN +ENTRY(vmx_asm_do_vmentry) GET_CURRENT(%rbx) -/* test_all_events: */ cli # tests must not race interrupts -/*test_softirqs:*/ + movl VCPU_processor(%rbx),%eax shl $IRQSTAT_shift,%rax - leaq irq_stat(%rip), %rdx - testl $~0,(%rdx,%rax,1) - jnz 2f + leaq irq_stat(%rip),%rdx + cmpl $0,(%rdx,%rax,1) + jnz vmx_process_softirqs -/* vmx_restore_all_guest */ call vmx_intr_assist call vmx_load_cr2 call vmx_trace_vmentry - .endif - /* - * Check if we are going back to VMX-based VM - * By this time, all the setups in the VMCS must be complete. - */ + + cmpl $0,VCPU_vmx_launched(%rbx) + je vmx_launch + +/*vmx_resume:*/ HVM_RESTORE_ALL_NOSEGREGS - .if \launch - /* VMLAUNCH */ - .byte 0x0f,0x01,0xc2 - pushfq - call vm_launch_fail - .else /* VMRESUME */ .byte 0x0f,0x01,0xc3 pushfq call vm_resume_fail - .endif - /* Should never reach here */ - hlt + ud2 - ALIGN - - .if \initialized -2: -/* vmx_process_softirqs */ - sti - call do_softirq - jmp 1b - ALIGN - .endif -.endm - -ENTRY(vmx_asm_do_launch) - vmx_asm_common 1, 0 - -ENTRY(vmx_asm_do_resume) - vmx_asm_common 0, 1 - -ENTRY(vmx_asm_do_relaunch) - vmx_asm_common 1, 1 +vmx_launch: + movl $1,VCPU_vmx_launched(%rbx) + HVM_RESTORE_ALL_NOSEGREGS + /* VMLAUNCH */ + .byte 0x0f,0x01,0xc2 + pushfq + call vm_launch_fail + ud2 diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c index f03556ef8d..5a10d51018 100644 --- a/xen/arch/x86/x86_32/asm-offsets.c +++ b/xen/arch/x86/x86_32/asm-offsets.c @@ -86,6 +86,9 @@ void __dummy__(void) OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc); BLANK(); + OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); + BLANK(); + OFFSET(VMCB_rax, struct vmcb_struct, rax); OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset); BLANK(); diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index 1719c50a05..0a4db7d430 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -80,6 +80,9 @@ void __dummy__(void) OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc); BLANK(); + OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); + BLANK(); + OFFSET(VMCB_rax, struct vmcb_struct, rax); OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset); BLANK(); diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h index b2030c3e5b..dc38592d11 100644 --- a/xen/include/asm-ia64/vmx_vpd.h +++ b/xen/include/asm-ia64/vmx_vpd.h @@ -104,9 +104,6 @@ struct arch_vmx_struct { #define VMX_DOMAIN(d) d->arch.arch_vmx.flags -#define ARCH_VMX_VMCS_LOADED 0 /* VMCS has been loaded and active */ -#define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */ -#define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */ #define ARCH_VMX_IO_WAIT 3 /* Waiting for I/O completion */ #define ARCH_VMX_INTR_ASSIST 4 /* Need DM's assist to issue intr */ #define ARCH_VMX_CONTIG_MEM 5 /* Need contiguous machine pages */ diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index c664dca89a..dbc01c61c3 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -65,34 +65,46 @@ struct vmx_msr_state { }; struct arch_vmx_struct { - struct vmcs_struct *vmcs; /* VMCS pointer in virtual. */ - unsigned int launch_cpu; /* VMCS is valid on this CPU. */ - u32 exec_control; /* cache of cpu execution control */ - u32 vector_injected; /* if there is vector installed in the INTR_INFO_FIELD */ - unsigned long flags; /* VMCS flags */ - unsigned long cpu_cr0; /* copy of guest CR0 */ - unsigned long cpu_shadow_cr0; /* copy of guest read shadow CR0 */ - unsigned long cpu_cr2; /* save CR2 */ - unsigned long cpu_cr3; - unsigned long cpu_state; - unsigned long cpu_based_exec_control; - struct vmx_msr_state msr_content; - void *io_bitmap_a, *io_bitmap_b; - struct timer hlt_timer; /* hlt ins emulation wakeup timer */ + /* Virtual address of VMCS. */ + struct vmcs_struct *vmcs; + + /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */ + spinlock_t vmcs_lock; + + /* + * Activation and launch status of this VMCS. + * - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR. + * - Launched on active CPU by VMLAUNCH when current VMCS. + */ + int active_cpu; + int launched; + + /* Cache of cpu execution control. */ + u32 exec_control; + + /* If there is vector installed in the INTR_INFO_FIELD. */ + u32 vector_injected; + + unsigned long cpu_cr0; /* copy of guest CR0 */ + unsigned long cpu_shadow_cr0; /* copy of guest read shadow CR0 */ + unsigned long cpu_cr2; /* save CR2 */ + unsigned long cpu_cr3; + unsigned long cpu_state; + unsigned long cpu_based_exec_control; + struct vmx_msr_state msr_content; + void *io_bitmap_a, *io_bitmap_b; + struct timer hlt_timer; /* hlt ins emulation wakeup timer */ }; #define vmx_schedule_tail(next) \ (next)->thread.arch_vmx.arch_vmx_schedule_tail((next)) -#define ARCH_VMX_VMCS_LOADED 0 /* VMCS has been loaded and active */ -#define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */ -#define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */ - void vmx_do_resume(struct vcpu *); -struct vmcs_struct *alloc_vmcs(void); -void destroy_vmcs(struct arch_vmx_struct *arch_vmx); -extern void vmx_request_clear_vmcs(struct vcpu *v); +struct vmcs_struct *vmx_alloc_vmcs(void); +void vmx_destroy_vmcs(struct vcpu *v); +void vmx_vmcs_enter(struct vcpu *v); +void vmx_vmcs_exit(struct vcpu *v); #define VMCS_USE_HOST_ENV 1 #define VMCS_USE_SEPARATE_ENV 0 diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h index 9ea3baf75e..074c85bbaf 100644 --- a/xen/include/asm-x86/hvm/vmx/vmx.h +++ b/xen/include/asm-x86/hvm/vmx/vmx.h @@ -27,8 +27,7 @@ #include extern void vmx_asm_vmexit_handler(struct cpu_user_regs); -extern void vmx_asm_do_resume(void); -extern void vmx_asm_do_launch(void); +extern void vmx_asm_do_vmentry(void); extern void vmx_intr_assist(void); extern void vmx_migrate_timers(struct vcpu *v); extern void arch_vmx_do_launch(struct vcpu *); @@ -200,22 +199,18 @@ extern unsigned int cpu_rev; #define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ #define MODRM_EAX_ECX ".byte 0xc1\n" /* [EAX], [ECX] */ -static inline int __vmptrld (u64 addr) +static inline void __vmptrld(u64 addr) { - unsigned long eflags; __asm__ __volatile__ ( VMPTRLD_OPCODE MODRM_EAX_06 + /* CF==1 or ZF==1 --> crash (ud2) */ + "ja 1f ; ud2 ; 1:\n" : : "a" (&addr) : "memory"); - - __save_flags(eflags); - if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) - return -1; - return 0; } -static inline void __vmptrst (u64 addr) +static inline void __vmptrst(u64 addr) { __asm__ __volatile__ ( VMPTRST_OPCODE MODRM_EAX_07 @@ -224,31 +219,30 @@ static inline void __vmptrst (u64 addr) : "memory"); } -static inline int __vmpclear (u64 addr) +static inline void __vmpclear(u64 addr) { - unsigned long eflags; - __asm__ __volatile__ ( VMCLEAR_OPCODE MODRM_EAX_06 + /* CF==1 or ZF==1 --> crash (ud2) */ + "ja 1f ; ud2 ; 1:\n" : : "a" (&addr) : "memory"); - __save_flags(eflags); - if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) - return -1; - return 0; } #define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr))) -static always_inline int ___vmread (const unsigned long field, void *ptr, const int size) +static always_inline int ___vmread( + const unsigned long field, void *ptr, const int size) { - unsigned long eflags; unsigned long ecx = 0; + int rc; __asm__ __volatile__ ( VMREAD_OPCODE - MODRM_EAX_ECX - : "=c" (ecx) + MODRM_EAX_ECX + /* CF==1 or ZF==1 --> rc = -1 */ + "setna %b0 ; neg %0" + : "=r" (rc), "=c" (ecx) : "a" (field) : "memory"); @@ -270,10 +264,7 @@ static always_inline int ___vmread (const unsigned long field, void *ptr, const break; } - __save_flags(eflags); - if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) - return -1; - return 0; + return rc; } @@ -315,17 +306,16 @@ static always_inline void __vmread_vcpu(struct vcpu *v, unsigned long field, uns static inline int __vmwrite (unsigned long field, unsigned long value) { - unsigned long eflags; struct vcpu *v = current; + int rc; __asm__ __volatile__ ( VMWRITE_OPCODE MODRM_EAX_ECX - : + /* CF==1 or ZF==1 --> rc = -1 */ + "setna %b0 ; neg %0" + : "=r" (rc) : "a" (field) , "c" (value) : "memory"); - __save_flags(eflags); - if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) - return -1; switch(field) { case CR0_READ_SHADOW: @@ -335,7 +325,7 @@ static inline int __vmwrite (unsigned long field, unsigned long value) break; } - return 0; + return rc; } static inline int __vm_set_bit(unsigned long field, unsigned long mask) @@ -370,17 +360,17 @@ static inline void __vmxoff (void) static inline int __vmxon (u64 addr) { - unsigned long eflags; + int rc; __asm__ __volatile__ ( VMXON_OPCODE MODRM_EAX_06 - : + /* CF==1 or ZF==1 --> rc = -1 */ + "setna %b0 ; neg %0" + : "=r" (rc) : "a" (&addr) : "memory"); - __save_flags(eflags); - if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF) - return -1; - return 0; + + return rc; } /* Make sure that xen intercepts any FP accesses from current */