#define VMRESUME .byte 0x0f,0x01,0xc3
#define VMLAUNCH .byte 0x0f,0x01,0xc2
-#define VMREAD(off) .byte 0x0f,0x78,0x47,((off)-UREGS_rip)
-#define VMWRITE(off) .byte 0x0f,0x79,0x47,((off)-UREGS_rip)
-/* VMCS field encodings */
-#define GUEST_RSP 0x681c
-#define GUEST_RIP 0x681e
-#define GUEST_RFLAGS 0x6820
-
- ALIGN
-.globl vmx_asm_vmexit_handler
-vmx_asm_vmexit_handler:
+ENTRY(vmx_asm_vmexit_handler)
push %rdi
push %rsi
push %rdx
push %rcx
push %rax
+ mov %cr2,%rax
push %r8
push %r9
push %r10
push %r11
push %rbx
+ GET_CURRENT(%rbx)
push %rbp
push %r12
push %r13
push %r14
push %r15
- GET_CURRENT(%rbx)
-
movb $1,VCPU_vmx_launched(%rbx)
-
- lea UREGS_rip(%rsp),%rdi
- mov $GUEST_RIP,%eax
- /*VMREAD(UREGS_rip)*/
- .byte 0x0f,0x78,0x07 /* vmread %rax,(%rdi) */
- mov $GUEST_RSP,%eax
- VMREAD(UREGS_rsp)
- mov $GUEST_RFLAGS,%eax
- VMREAD(UREGS_eflags)
-
- mov %cr2,%rax
mov %rax,VCPU_hvm_guest_cr2(%rbx)
-#ifndef NDEBUG
- mov $0xbeef,%ax
- mov %ax,UREGS_error_code(%rsp)
- mov %ax,UREGS_entry_vector(%rsp)
- mov %ax,UREGS_saved_upcall_mask(%rsp)
- mov %ax,UREGS_cs(%rsp)
- mov %ax,UREGS_ds(%rsp)
- mov %ax,UREGS_es(%rsp)
- mov %ax,UREGS_fs(%rsp)
- mov %ax,UREGS_gs(%rsp)
- mov %ax,UREGS_ss(%rsp)
-#endif
-
mov %rsp,%rdi
call vmx_vmexit_handler
-.globl vmx_asm_do_vmentry
-vmx_asm_do_vmentry:
+.Lvmx_do_vmentry:
call vmx_intr_assist
call nvmx_switch_guest
ASSERT_NOT_IN_ATOMIC
- GET_CURRENT(%rbx)
- cli
-
mov VCPU_processor(%rbx),%eax
- shl $IRQSTAT_shift,%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
- cmpl $0,(%rdx,%rax,1)
+ xor %ecx,%ecx
+ shl $IRQSTAT_shift,%eax
+ cli
+ cmp %ecx,(%rdx,%rax,1)
jnz .Lvmx_process_softirqs
- testb $0xff,VCPU_vmx_emulate(%rbx)
- jnz .Lvmx_goto_emulator
- testb $0xff,VCPU_vmx_realmode(%rbx)
-UNLIKELY_START(nz, realmode)
- cmpw $0,VCPU_vm86_seg_mask(%rbx)
+ cmp %cl,VCPU_vmx_emulate(%rbx)
+ jne .Lvmx_goto_emulator
+ cmp %cl,VCPU_vmx_realmode(%rbx)
+UNLIKELY_START(ne, realmode)
+ cmp %cx,VCPU_vm86_seg_mask(%rbx)
jnz .Lvmx_goto_emulator
mov %rsp,%rdi
call vmx_enter_realmode
UNLIKELY_END(realmode)
+ mov %rsp,%rdi
call vmx_vmenter_helper
mov VCPU_hvm_guest_cr2(%rbx),%rax
- mov %rax,%cr2
- lea UREGS_rip(%rsp),%rdi
- mov $GUEST_RIP,%eax
- /*VMWRITE(UREGS_rip)*/
- .byte 0x0f,0x79,0x07 /* vmwrite (%rdi),%rax */
- mov $GUEST_RSP,%eax
- VMWRITE(UREGS_rsp)
- mov $GUEST_RFLAGS,%eax
- VMWRITE(UREGS_eflags)
-
- cmpb $0,VCPU_vmx_launched(%rbx)
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
+ mov %rax,%cr2
+ cmpb $0,VCPU_vmx_launched(%rbx)
pop %rbx
pop %r11
pop %r10
call vm_launch_fail
ud2
+ENTRY(vmx_asm_do_vmentry)
+ GET_CURRENT(%rbx)
+ jmp .Lvmx_do_vmentry
+
.Lvmx_goto_emulator:
sti
mov %rsp,%rdi
call vmx_realmode
- jmp vmx_asm_do_vmentry
+ jmp .Lvmx_do_vmentry
.Lvmx_process_softirqs:
sti
call do_softirq
- jmp vmx_asm_do_vmentry
+ jmp .Lvmx_do_vmentry
unsigned long exit_qualification, inst_len = 0;
struct vcpu *v = current;
+ regs->rip = __vmread(GUEST_RIP);
+ regs->rsp = __vmread(GUEST_RSP);
+ regs->rflags = __vmread(GUEST_RFLAGS);
+
+ hvm_invalidate_regs_fields(regs);
+
if ( paging_mode_hap(v->domain) && hvm_paging_enabled(v) )
v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
__vmread(GUEST_CR3);
nvmx_idtv_handling();
}
-void vmx_vmenter_helper(void)
+void vmx_vmenter_helper(const struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
out:
HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+
+ __vmwrite(GUEST_RIP, regs->rip);
+ __vmwrite(GUEST_RSP, regs->rsp);
+ __vmwrite(GUEST_RFLAGS, regs->rflags);
}
/*