switch ( msr )
{
case MSR_FS_BASE:
- *msr_content = __vmread(GUEST_FS_BASE);
+ __vmread(GUEST_FS_BASE, msr_content);
break;
case MSR_GS_BASE:
- *msr_content = __vmread(GUEST_GS_BASE);
+ __vmread(GUEST_GS_BASE, msr_content);
break;
case MSR_SHADOW_GS_BASE:
static int vmx_guest_x86_mode(struct vcpu *v)
{
- unsigned int cs_ar_bytes;
+ unsigned long cs_ar_bytes;
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
- cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
+ __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
if ( hvm_long_mode_enabled(v) &&
likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
return 8;
v->arch.debugreg[3] = read_debugreg(3);
v->arch.debugreg[6] = read_debugreg(6);
/* DR7 must be saved as it is used by vmx_restore_dr(). */
- v->arch.debugreg[7] = __vmread(GUEST_DR7);
+ __vmread(GUEST_DR7, &v->arch.debugreg[7]);
}
static void __restore_debug_registers(struct vcpu *v)
static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
{
- uint32_t ev;
+ unsigned long ev;
vmx_vmcs_enter(v);
c->msr_efer = v->arch.hvm_vcpu.guest_efer;
- c->sysenter_cs = __vmread(GUEST_SYSENTER_CS);
- c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
- c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
+ __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
+ __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
+ __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
c->pending_event = 0;
c->error_code = 0;
- if ( ((ev = __vmread(VM_ENTRY_INTR_INFO)) & INTR_INFO_VALID_MASK) &&
+ __vmread(VM_ENTRY_INTR_INFO, &ev);
+ if ( (ev & INTR_INFO_VALID_MASK) &&
hvm_event_needs_reinjection((ev >> 8) & 7, ev & 0xff) )
{
c->pending_event = ev;
- c->error_code = __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE);
+ __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &ev);
+ c->error_code = ev;
}
vmx_vmcs_exit(v);
void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
- uint32_t attr = 0;
+ unsigned long attr = 0, sel = 0, limit;
vmx_vmcs_enter(v);
switch ( seg )
{
case x86_seg_cs:
- reg->sel = __vmread(GUEST_CS_SELECTOR);
- reg->limit = __vmread(GUEST_CS_LIMIT);
- reg->base = __vmread(GUEST_CS_BASE);
- attr = __vmread(GUEST_CS_AR_BYTES);
+ __vmread(GUEST_CS_SELECTOR, &sel);
+ __vmread(GUEST_CS_LIMIT, &limit);
+ __vmread(GUEST_CS_BASE, ®->base);
+ __vmread(GUEST_CS_AR_BYTES, &attr);
break;
case x86_seg_ds:
- reg->sel = __vmread(GUEST_DS_SELECTOR);
- reg->limit = __vmread(GUEST_DS_LIMIT);
- reg->base = __vmread(GUEST_DS_BASE);
- attr = __vmread(GUEST_DS_AR_BYTES);
+ __vmread(GUEST_DS_SELECTOR, &sel);
+ __vmread(GUEST_DS_LIMIT, &limit);
+ __vmread(GUEST_DS_BASE, ®->base);
+ __vmread(GUEST_DS_AR_BYTES, &attr);
break;
case x86_seg_es:
- reg->sel = __vmread(GUEST_ES_SELECTOR);
- reg->limit = __vmread(GUEST_ES_LIMIT);
- reg->base = __vmread(GUEST_ES_BASE);
- attr = __vmread(GUEST_ES_AR_BYTES);
+ __vmread(GUEST_ES_SELECTOR, &sel);
+ __vmread(GUEST_ES_LIMIT, &limit);
+ __vmread(GUEST_ES_BASE, ®->base);
+ __vmread(GUEST_ES_AR_BYTES, &attr);
break;
case x86_seg_fs:
- reg->sel = __vmread(GUEST_FS_SELECTOR);
- reg->limit = __vmread(GUEST_FS_LIMIT);
- reg->base = __vmread(GUEST_FS_BASE);
- attr = __vmread(GUEST_FS_AR_BYTES);
+ __vmread(GUEST_FS_SELECTOR, &sel);
+ __vmread(GUEST_FS_LIMIT, &limit);
+ __vmread(GUEST_FS_BASE, ®->base);
+ __vmread(GUEST_FS_AR_BYTES, &attr);
break;
case x86_seg_gs:
- reg->sel = __vmread(GUEST_GS_SELECTOR);
- reg->limit = __vmread(GUEST_GS_LIMIT);
- reg->base = __vmread(GUEST_GS_BASE);
- attr = __vmread(GUEST_GS_AR_BYTES);
+ __vmread(GUEST_GS_SELECTOR, &sel);
+ __vmread(GUEST_GS_LIMIT, &limit);
+ __vmread(GUEST_GS_BASE, ®->base);
+ __vmread(GUEST_GS_AR_BYTES, &attr);
break;
case x86_seg_ss:
- reg->sel = __vmread(GUEST_SS_SELECTOR);
- reg->limit = __vmread(GUEST_SS_LIMIT);
- reg->base = __vmread(GUEST_SS_BASE);
- attr = __vmread(GUEST_SS_AR_BYTES);
+ __vmread(GUEST_SS_SELECTOR, &sel);
+ __vmread(GUEST_SS_LIMIT, &limit);
+ __vmread(GUEST_SS_BASE, ®->base);
+ __vmread(GUEST_SS_AR_BYTES, &attr);
break;
case x86_seg_tr:
- reg->sel = __vmread(GUEST_TR_SELECTOR);
- reg->limit = __vmread(GUEST_TR_LIMIT);
- reg->base = __vmread(GUEST_TR_BASE);
- attr = __vmread(GUEST_TR_AR_BYTES);
+ __vmread(GUEST_TR_SELECTOR, &sel);
+ __vmread(GUEST_TR_LIMIT, &limit);
+ __vmread(GUEST_TR_BASE, ®->base);
+ __vmread(GUEST_TR_AR_BYTES, &attr);
break;
case x86_seg_gdtr:
- reg->limit = __vmread(GUEST_GDTR_LIMIT);
- reg->base = __vmread(GUEST_GDTR_BASE);
+ __vmread(GUEST_GDTR_LIMIT, &limit);
+ __vmread(GUEST_GDTR_BASE, ®->base);
break;
case x86_seg_idtr:
- reg->limit = __vmread(GUEST_IDTR_LIMIT);
- reg->base = __vmread(GUEST_IDTR_BASE);
+ __vmread(GUEST_IDTR_LIMIT, &limit);
+ __vmread(GUEST_IDTR_BASE, ®->base);
break;
case x86_seg_ldtr:
- reg->sel = __vmread(GUEST_LDTR_SELECTOR);
- reg->limit = __vmread(GUEST_LDTR_LIMIT);
- reg->base = __vmread(GUEST_LDTR_BASE);
- attr = __vmread(GUEST_LDTR_AR_BYTES);
+ __vmread(GUEST_LDTR_SELECTOR, &sel);
+ __vmread(GUEST_LDTR_LIMIT, &limit);
+ __vmread(GUEST_LDTR_BASE, ®->base);
+ __vmread(GUEST_LDTR_AR_BYTES, &attr);
break;
default:
BUG();
+ return;
}
vmx_vmcs_exit(v);
+ reg->sel = sel;
+ reg->limit = limit;
+
reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
/* Unusable flag is folded into Present flag. */
if ( attr & (1u<<16) )
return 0;
vmx_vmcs_enter(v);
- *gpat = __vmread(GUEST_PAT);
+ __vmread(GUEST_PAT, gpat);
vmx_vmcs_exit(v);
return 1;
}
static unsigned int vmx_get_interrupt_shadow(struct vcpu *v)
{
- return __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ unsigned long intr_shadow;
+
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
+
+ return intr_shadow;
}
static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
vmx_vmcs_enter(v);
- vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
+ __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
vm_entry_value |= VM_ENTRY_IA32E_MODE;
else
if ( nestedhvm_vcpu_in_guestmode(curr) )
intr_info = vcpu_2_nvmx(curr).intr.intr_info;
else
- intr_info = __vmread(VM_ENTRY_INTR_INFO);
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
switch ( _trap.vector )
{
static int vmx_event_pending(struct vcpu *v)
{
+ unsigned long intr_info;
+
ASSERT(v == current);
- return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+
+ return intr_info & INTR_INFO_VALID_MASK;
}
static void vmx_set_uc_mode(struct vcpu *v)
* to set the GUEST_PENDING_DBG_EXCEPTIONS.BS here incurs
* immediately vmexit and hence make no progress.
*/
- intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
if ( v->domain->debugger_attached &&
(v->arch.user_regs.eflags & X86_EFLAGS_TF) &&
(intr_shadow & VMX_INTR_SHADOW_STI) )
isr = 0;
vmx_vmcs_enter(v);
- status = __vmread(GUEST_INTR_STATUS);
+ __vmread(GUEST_INTR_STATUS, &status);
old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
if ( isr != old )
{
static void vmx_handle_eoi(u8 vector)
{
- unsigned long status = __vmread(GUEST_INTR_STATUS);
+ unsigned long status;
/* We need to clear the SVI field. */
+ __vmread(GUEST_INTR_STATUS, &status);
status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
__vmwrite(GUEST_INTR_STATUS, status);
}
*/
static int get_instruction_length(void)
{
- int len;
- len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
+ unsigned long len;
+
+ __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
BUG_ON((len < 1) || (len > 15));
return len;
}
regs->eip += get_instruction_length(); /* Safe: callers audited */
regs->eflags &= ~X86_EFLAGS_RF;
- x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &x);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
{
x &= ~(VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS);
switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- *msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
+ __vmread(GUEST_SYSENTER_CS, msr_content);
break;
case MSR_IA32_SYSENTER_ESP:
- *msr_content = __vmread(GUEST_SYSENTER_ESP);
+ __vmread(GUEST_SYSENTER_ESP, msr_content);
break;
case MSR_IA32_SYSENTER_EIP:
- *msr_content = __vmread(GUEST_SYSENTER_EIP);
+ __vmread(GUEST_SYSENTER_EIP, msr_content);
break;
case MSR_IA32_DEBUGCTLMSR:
- *msr_content = __vmread(GUEST_IA32_DEBUGCTL);
+ __vmread(GUEST_IA32_DEBUGCTL, msr_content);
break;
case IA32_FEATURE_CONTROL_MSR:
case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
static void vmx_do_extint(struct cpu_user_regs *regs)
{
- unsigned int vector;
+ unsigned long vector;
- vector = __vmread(VM_EXIT_INTR_INFO);
+ __vmread(VM_EXIT_INTR_INFO, &vector);
BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
}
+ if ( qualification & EPT_GLA_VALID )
+ __vmread(GUEST_LINEAR_ADDRESS, &gla);
+ else
+ gla = ~0ull;
ret = hvm_hap_nested_page_fault(gpa,
- qualification & EPT_GLA_VALID ? 1 : 0,
- qualification & EPT_GLA_VALID
- ? __vmread(GUEST_LINEAR_ADDRESS) : ~0ull,
- qualification & EPT_READ_VIOLATION ? 1 : 0,
- qualification & EPT_WRITE_VIOLATION ? 1 : 0,
- qualification & EPT_EXEC_VIOLATION ? 1 : 0);
+ !!(qualification & EPT_GLA_VALID), gla,
+ !!(qualification & EPT_READ_VIOLATION),
+ !!(qualification & EPT_WRITE_VIOLATION),
+ !!(qualification & EPT_EXEC_VIOLATION));
switch ( ret )
{
case 0: // Unhandled L1 EPT violation
ept_walk_table(d, gfn);
if ( qualification & EPT_GLA_VALID )
- {
- gla = __vmread(GUEST_LINEAR_ADDRESS);
gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
- }
domain_crash(d);
}
struct cpu_user_regs *regs)
{
unsigned int failed_vmentry_reason = (uint16_t)exit_reason;
- unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+ unsigned long exit_qualification;
struct vcpu *curr = current;
printk("Failed vm entry (exit reason %#x) ", exit_reason);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
switch ( failed_vmentry_reason )
{
case EXIT_REASON_INVALID_GUEST_STATE:
static int vmx_handle_eoi_write(void)
{
- unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
+ unsigned long exit_qualification;
/*
* 1. Must be a linear access data write.
* 2. Data write must be to the EOI register.
*/
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
if ( (((exit_qualification >> 12) & 0xf) == 1) &&
((exit_qualification & 0xfff) == APIC_EOI) )
{
__vmwrite(VM_ENTRY_INTR_INFO,
idtv_info & ~INTR_INFO_RESVD_BITS_MASK);
if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )
- __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
- __vmread(IDT_VECTORING_ERROR_CODE));
+ {
+ unsigned long ec;
+
+ __vmread(IDT_VECTORING_ERROR_CODE, &ec);
+ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, ec);
+ }
}
/*
* Re-delivery will re-set it (see SDM 3B 25.7.1.2).
*/
if ( (idtv_info & INTR_INFO_INTR_TYPE_MASK) == (X86_EVENTTYPE_NMI<<8) )
+ {
+ unsigned long intr_info;
+
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_info);
__vmwrite(GUEST_INTERRUPTIBILITY_INFO,
- __vmread(GUEST_INTERRUPTIBILITY_INFO) &
- ~VMX_INTR_SHADOW_NMI);
+ intr_info & ~VMX_INTR_SHADOW_NMI);
+ }
}
}
static int vmx_handle_apic_write(void)
{
- unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION);
- unsigned int offset = exit_qualification & 0xfff;
+ unsigned long exit_qualification;
ASSERT(cpu_has_vmx_apic_reg_virt);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
- return vlapic_apicv_write(current, offset);
+ return vlapic_apicv_write(current, exit_qualification & 0xfff);
}
/*
void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
- unsigned int exit_reason, idtv_info, intr_info = 0, vector = 0;
- unsigned long exit_qualification, inst_len = 0;
+ unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
+ unsigned int vector = 0;
struct vcpu *v = current;
- regs->rip = __vmread(GUEST_RIP);
- regs->rsp = __vmread(GUEST_RSP);
- regs->rflags = __vmread(GUEST_RFLAGS);
+ __vmread(GUEST_RIP, ®s->rip);
+ __vmread(GUEST_RSP, ®s->rsp);
+ __vmread(GUEST_RFLAGS, ®s->rflags);
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) && hvm_paging_enabled(v) )
- v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
- __vmread(GUEST_CR3);
+ {
+ __vmread(GUEST_CR3, &v->arch.hvm_vcpu.hw_cr[3]);
+ v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3];
+ }
- exit_reason = __vmread(VM_EXIT_REASON);
+ __vmread(VM_EXIT_REASON, &exit_reason);
if ( hvm_long_mode_enabled(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
vmx_do_extint(regs);
break;
case EXIT_REASON_EXCEPTION_NMI:
- intr_info = __vmread(VM_EXIT_INTR_INFO);
+ __vmread(VM_EXIT_INTR_INFO, &intr_info);
BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
vector = intr_info & INTR_INFO_VECTOR_MASK;
if ( vector == TRAP_machine_check )
hvm_maybe_deassert_evtchn_irq();
- idtv_info = __vmread(IDT_VECTORING_INFO);
+ __vmread(IDT_VECTORING_INFO, &idtv_info);
if ( !nestedhvm_vcpu_in_guestmode(v) &&
exit_reason != EXIT_REASON_TASK_SWITCH )
vmx_idtv_reinject(idtv_info);
switch ( exit_reason )
{
+ unsigned long ecode;
+
case EXIT_REASON_EXCEPTION_NMI:
{
/*
if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
!(idtv_info & INTR_INFO_VALID_MASK) &&
(vector != TRAP_double_fault) )
+ {
+ unsigned long guest_info;
+
+ __vmread(GUEST_INTERRUPTIBILITY_INFO, &guest_info);
__vmwrite(GUEST_INTERRUPTIBILITY_INFO,
- __vmread(GUEST_INTERRUPTIBILITY_INFO)
- | VMX_INTR_SHADOW_NMI);
+ guest_info | VMX_INTR_SHADOW_NMI);
+ }
perfc_incra(cause_vector, vector);
* Updates DR6 where debugger can peek (See 3B 23.2.1,
* Table 23-1, "Exit Qualification for Debug Exceptions").
*/
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
HVMTRACE_1D(TRAP_DEBUG, exit_qualification);
write_debugreg(6, exit_qualification | 0xffff0ff0);
if ( !v->domain->debugger_attached || cpu_has_monitor_trap_flag )
.vector = TRAP_int3,
.type = X86_EVENTTYPE_SW_EXCEPTION,
.error_code = HVM_DELIVER_NO_ERROR_CODE,
- .insn_len = __vmread(VM_EXIT_INSTRUCTION_LEN)
};
+ unsigned long insn_len;
+
+ __vmread(VM_EXIT_INSTRUCTION_LEN, &insn_len);
+ trap.insn_len = insn_len;
hvm_inject_trap(&trap);
break;
}
vmx_fpu_dirty_intercept();
break;
case TRAP_page_fault:
- exit_qualification = __vmread(EXIT_QUALIFICATION);
- regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
+ regs->error_code = ecode;
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
static const enum hvm_task_switch_reason reasons[] = {
TSW_call_or_int, TSW_iret, TSW_jmp, TSW_call_or_int
};
- int32_t ecode = -1, source;
+ unsigned int inst_len, source;
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
source = (exit_qualification >> 30) & 3;
/* Vectored event should fill in interrupt information. */
WARN_ON((source == 3) && !(idtv_info & INTR_INFO_VALID_MASK));
(idtv_info & (1u<<10))) /* IntrType > 3? */
? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0;
if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) )
- ecode = __vmread(IDT_VECTORING_ERROR_CODE);
+ __vmread(IDT_VECTORING_ERROR_CODE, &ecode);
+ else
+ ecode = -1;
regs->eip += inst_len;
hvm_task_switch((uint16_t)exit_qualification, reasons[source], ecode);
break;
break;
case EXIT_REASON_INVLPG:
update_guest_eip(); /* Safe: INVLPG */
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
}
case EXIT_REASON_CR_ACCESS:
{
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY )
update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */
break;
}
case EXIT_REASON_DR_ACCESS:
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
vmx_dr_access(exit_qualification, regs);
break;
case EXIT_REASON_MSR_READ:
case EXIT_REASON_EOI_INDUCED:
{
int vector;
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
vector = exit_qualification & 0xff;
vmx_handle_EOI_induced_exit(vcpu_vlapic(current), vector);
}
case EXIT_REASON_IO_INSTRUCTION:
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
if ( exit_qualification & 0x10 )
{
/* INS, OUTS */
case EXIT_REASON_EPT_VIOLATION:
{
- paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
- exit_qualification = __vmread(EXIT_QUALIFICATION);
+ paddr_t gpa;
+
+ __vmread(GUEST_PHYSICAL_ADDRESS, &gpa);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
ept_handle_violation(exit_qualification, gpa);
break;
}
/* fall through */
default:
exit_and_crash:
- gdprintk(XENLOG_ERR, "Bad vmexit (reason %#x)\n", exit_reason);
+ gdprintk(XENLOG_ERR, "Bad vmexit (reason %#lx)\n", exit_reason);
domain_crash(v->domain);
break;
}
if ( vmx_inst_check_privilege(regs, vmxon_check) != X86EMUL_OKAY )
return X86EMUL_EXCEPTION;
- info.word = __vmread(VMX_INSTRUCTION_INFO);
+ __vmread(VMX_INSTRUCTION_INFO, &offset);
+ info.word = offset;
if ( info.fields.memreg ) {
decode->type = VMX_INST_MEMREG_TYPE_REG;
scale = 1 << info.fields.scaling;
- disp = __vmread(EXIT_QUALIFICATION);
+ __vmread(EXIT_QUALIFICATION, &disp);
size = 1 << (info.fields.addr_size + 1);
virtual_vmcs_enter(vvmcs);
for ( i = 0; i < n; i++ )
- value[i] = __vmread(field[i]);
+ __vmread(field[i], &value[i]);
virtual_vmcs_exit(vvmcs);
for ( i = 0; i < n; i++ )
}
for ( i = 0; i < n; i++ )
- value[i] = __vmread(field[i]);
+ __vmread(field[i], &value[i]);
virtual_vmcs_enter(vvmcs);
for ( i = 0; i < n; i++ )
{
int rc;
unsigned long gfn;
- uint64_t exit_qual = __vmread(EXIT_QUALIFICATION);
+ uint64_t exit_qual;
uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION;
uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ __vmread(EXIT_QUALIFICATION, &exit_qual);
rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
&exit_qual, &exit_reason);
switch ( rc )
struct vcpu *v = current;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- unsigned int idtv_info = __vmread(IDT_VECTORING_INFO);
+ unsigned long idtv_info, reason;
+ __vmread(IDT_VECTORING_INFO, &idtv_info);
if ( likely(!(idtv_info & INTR_INFO_VALID_MASK)) )
return;
* If L0 can solve the fault that causes idt vectoring, it should
* be reinjected, otherwise, pass to L1.
*/
- if ( (__vmread(VM_EXIT_REASON) != EXIT_REASON_EPT_VIOLATION &&
- !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK)) ||
- (__vmread(VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION &&
- !nvcpu->nv_vmexit_pending) )
+ __vmread(VM_EXIT_REASON, &reason);
+ if ( reason != EXIT_REASON_EPT_VIOLATION ?
+ !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) :
+ !nvcpu->nv_vmexit_pending )
{
__vmwrite(VM_ENTRY_INTR_INFO, idtv_info & ~INTR_INFO_RESVD_BITS_MASK);
if ( idtv_info & INTR_INFO_DELIVER_CODE_MASK )
- __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
- __vmread(IDT_VECTORING_ERROR_CODE));
+ {
+ __vmread(IDT_VECTORING_ERROR_CODE, &reason);
+ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, reason);
+ }
/*
* SDM 23.2.4, if L1 tries to inject a software interrupt
* and the delivery fails, VM_EXIT_INSTRUCTION_LEN receives
* This means EXIT_INSTRUCTION_LEN is always valid here, for
* software interrupts both injected by L1, and generated in L2.
*/
- __vmwrite(VM_ENTRY_INSTRUCTION_LEN, __vmread(VM_EXIT_INSTRUCTION_LEN));
+ __vmread(VM_EXIT_INSTRUCTION_LEN, &reason);
+ __vmwrite(VM_ENTRY_INSTRUCTION_LEN, reason);
}
}
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
u32 ctrl;
- u16 port;
u8 *bitmap;
nvcpu->nv_vmexit_pending = 0;
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
{
- u32 intr_info = __vmread(VM_EXIT_INTR_INFO);
+ unsigned long intr_info;
u32 valid_mask = (X86_EVENTTYPE_HW_EXCEPTION << 8) |
INTR_INFO_VALID_MASK;
u64 exec_bitmap;
- int vector = intr_info & INTR_INFO_VECTOR_MASK;
+ int vector;
+ __vmread(VM_EXIT_INTR_INFO, &intr_info);
+ vector = intr_info & INTR_INFO_VECTOR_MASK;
/*
* decided by L0 and L1 exception bitmap, if the vetor is set by
* both, L0 has priority on #PF and #NM, L1 has priority on others
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
{
- port = __vmread(EXIT_QUALIFICATION) >> 16;
+ unsigned long qual;
+ u16 port;
+
+ __vmread(EXIT_QUALIFICATION, &qual);
+ port = qual >> 16;
bitmap = nvmx->iobitmap[port >> 15];
if ( bitmap[(port & 0x7fff) >> 4] & (1 << (port & 0x7)) )
nvcpu->nv_vmexit_pending = 1;
break;
case EXIT_REASON_CR_ACCESS:
{
- u64 exit_qualification = __vmread(EXIT_QUALIFICATION);
- int cr = exit_qualification & 15;
- int write = (exit_qualification >> 4) & 3;
+ unsigned long exit_qualification;
+ int cr, write;
u32 mask = 0;
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ cr = exit_qualification & 0xf;
+ write = (exit_qualification >> 4) & 3;
/* also according to guest exec_control */
ctrl = __n2_exec_control(v);
{
u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK);
- old_val = __vmread(CR0_READ_SHADOW);
+ __vmread(CR0_READ_SHADOW, &old_val);
changed_bits = old_val ^ val;
if ( changed_bits & cr0_gh_mask )
nvcpu->nv_vmexit_pending = 1;
{
u64 cr4_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR4_GUEST_HOST_MASK);
- old_val = __vmread(CR4_READ_SHADOW);
+ __vmread(CR4_READ_SHADOW, &old_val);
changed_bits = old_val ^ val;
if ( changed_bits & cr4_gh_mask )
nvcpu->nv_vmexit_pending = 1;
{
u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK);
- old_val = __vmread(CR0_READ_SHADOW) & 0xf;
+ __vmread(CR0_READ_SHADOW, &old_val);
+ old_val &= 0xf;
val = (exit_qualification >> 16) & 0xf;
changed_bits = old_val ^ val;
if ( changed_bits & cr0_gh_mask )