/* %eax == 1 signals full real-mode support to the guest loader. */
if ( v->vcpu_id == 0 )
- v->arch.user_regs.eax = 1;
+ v->arch.user_regs.rax = 1;
return 0;
}
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->_eflags & X86_EFLAGS_VM) )
return 1;
__vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
if ( hvm_long_mode_enabled(v) &&
switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
case TRAP_debug:
- if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
+ if ( guest_cpu_user_regs()->_eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
write_debugreg(6, read_debugreg(6) | DR_STEP);
*/
__vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
if ( v->domain->debugger_attached &&
- (v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
+ (v->arch.user_regs._eflags & X86_EFLAGS_TF) &&
(intr_shadow & VMX_INTR_SHADOW_STI) )
{
intr_shadow &= ~VMX_INTR_SHADOW_STI;
struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long x;
- regs->eip += get_instruction_length(); /* Safe: callers audited */
- regs->eflags &= ~X86_EFLAGS_RF;
+ regs->rip += get_instruction_length(); /* Safe: callers audited */
+ regs->_eflags &= ~X86_EFLAGS_RF;
__vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, x);
}
- if ( regs->eflags & X86_EFLAGS_TF )
+ if ( regs->_eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
return 1; /* Don't advance the guest IP! */
}
- eax = regs->eax;
- ebx = regs->ebx;
- ecx = regs->ecx;
- edx = regs->edx;
+ eax = regs->_eax;
+ ebx = regs->_ebx;
+ ecx = regs->_ecx;
+ edx = regs->_edx;
- leaf = regs->eax;
- subleaf = regs->ecx;
+ leaf = regs->_eax;
+ subleaf = regs->_ecx;
hvm_cpuid(leaf, &eax, &ebx, &ecx, &edx);
HVMTRACE_5D(CPUID, leaf, eax, ebx, ecx, edx);
- regs->eax = eax;
- regs->ebx = ebx;
- regs->ecx = ecx;
- regs->edx = edx;
+ regs->rax = eax;
+ regs->rbx = ebx;
+ regs->rcx = ecx;
+ regs->rdx = edx;
return hvm_monitor_cpuid(get_instruction_length(), leaf, subleaf);
}
/* Adjust RFLAGS to enter virtual 8086 mode with IOPL == 3. Since
* we have CR4.VME == 1 and our own TSS with an empty interrupt
* redirection bitmap, all software INTs will be handled by vm86 */
- v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags;
- regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ v->arch.hvm_vmx.vm86_saved_eflags = regs->_eflags;
+ regs->_eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
}
static int vmx_handle_eoi_write(void)
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
- (uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
- 0, 0, 0);
+ regs->_eip, regs->rip >> 32, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
- (uint32_t)regs->eip,
- 0, 0, 0, 0);
+ regs->_eip, 0, 0, 0, 0);
perfc_incra(vmexits, exit_reason);
if ( v->arch.hvm_vmx.vmx_realmode )
{
/* Put RFLAGS back the way the guest wants it */
- regs->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
- regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
+ regs->_eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IOPL);
+ regs->_eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL);
/* Unless this exit was for an interrupt, we've hit something
* vm86 can't handle. Try again, using the emulator. */
if ( trap_type >= X86_EVENTTYPE_SW_INTERRUPT )
__vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
- rc = hvm_monitor_debug(regs->eip,
+ rc = hvm_monitor_debug(regs->rip,
HVM_MONITOR_DEBUG_EXCEPTION,
trap_type, insn_len);
int rc;
__vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
- rc = hvm_monitor_debug(regs->eip,
+ rc = hvm_monitor_debug(regs->rip,
HVM_MONITOR_SOFTWARE_BREAKPOINT,
X86_EVENTTYPE_SW_EXCEPTION,
insn_len);
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
- (unsigned long)regs->eax, (unsigned long)regs->ebx,
- (unsigned long)regs->ecx, (unsigned long)regs->edx,
- (unsigned long)regs->esi, (unsigned long)regs->edi);
+ regs->rax, regs->rbx, regs->rcx,
+ regs->rdx, regs->rsi, regs->rdi);
if ( paging_fault(exit_qualification, regs) )
{
__vmread(IDT_VECTORING_ERROR_CODE, &ecode);
else
ecode = -1;
- regs->eip += inst_len;
+ regs->rip += inst_len;
hvm_task_switch((uint16_t)exit_qualification, reasons[source], ecode);
break;
}
}
case EXIT_REASON_HLT:
update_guest_eip(); /* Safe: HLT */
- hvm_hlt(regs->eflags);
+ hvm_hlt(regs->_eflags);
break;
case EXIT_REASON_INVLPG:
update_guest_eip(); /* Safe: INVLPG */
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- regs->ecx = hvm_msr_tsc_aux(v);
+ regs->rcx = hvm_msr_tsc_aux(v);
/* fall through */
case EXIT_REASON_RDTSC:
update_guest_eip(); /* Safe: RDTSC, RDTSCP */
case EXIT_REASON_VMCALL:
{
int rc;
- HVMTRACE_1D(VMMCALL, regs->eax);
+ HVMTRACE_1D(VMMCALL, regs->_eax);
rc = hvm_do_hypercall(regs);
if ( rc != HVM_HCALL_preempted )
{
vmx_update_cpu_exec_control(v);
if ( v->arch.hvm_vcpu.single_step )
{
- hvm_monitor_debug(regs->eip,
+ hvm_monitor_debug(regs->rip,
HVM_MONITOR_SINGLESTEP_BREAKPOINT,
0, 0);
static void vmsucceed(struct cpu_user_regs *regs)
{
- regs->eflags &= ~X86_EFLAGS_ARITH_MASK;
+ regs->_eflags &= ~X86_EFLAGS_ARITH_MASK;
}
static void vmfail_valid(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
{
struct vcpu *v = current;
- unsigned long eflags = regs->eflags;
+ unsigned int eflags = regs->_eflags;
- regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
+ regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_ZF;
set_vvmcs(v, VM_INSTRUCTION_ERROR, errno);
}
static void vmfail_invalid(struct cpu_user_regs *regs)
{
- unsigned long eflags = regs->eflags;
+ unsigned int eflags = regs->_eflags;
- regs->eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
+ regs->_eflags = (eflags & ~X86_EFLAGS_ARITH_MASK) | X86_EFLAGS_CF;
}
static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
!(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
- regs->eip = get_vvmcs(v, GUEST_RIP);
- regs->esp = get_vvmcs(v, GUEST_RSP);
- regs->eflags = get_vvmcs(v, GUEST_RFLAGS);
+ regs->rip = get_vvmcs(v, GUEST_RIP);
+ regs->rsp = get_vvmcs(v, GUEST_RSP);
+ regs->rflags = get_vvmcs(v, GUEST_RFLAGS);
/* updating host cr0 to sync TS bit */
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field),
vmcs_gstate_field);
/* RIP, RSP are in user regs */
- set_vvmcs(v, GUEST_RIP, regs->eip);
- set_vvmcs(v, GUEST_RSP, regs->esp);
+ set_vvmcs(v, GUEST_RIP, regs->rip);
+ set_vvmcs(v, GUEST_RSP, regs->rsp);
/* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */
if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) )
if ( lm_l1 != lm_l2 )
paging_update_paging_modes(v);
- regs->eip = get_vvmcs(v, HOST_RIP);
- regs->esp = get_vvmcs(v, HOST_RSP);
+ regs->rip = get_vvmcs(v, HOST_RIP);
+ regs->rsp = get_vvmcs(v, HOST_RSP);
/* VM exit clears all bits except bit 1 */
- regs->eflags = 0x2;
+ regs->rflags = X86_EFLAGS_MBS;
/* updating host cr0 to sync TS bit */
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP )
{
- status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->ecx,
+ status = vmx_check_msr_bitmap(nvmx->msrbitmap, regs->_ecx,
!!(exit_reason == EXIT_REASON_MSR_WRITE));
if ( status )
nvcpu->nv_vmexit_pending = 1;