static void vmx_set_host_env(struct vcpu *v)
{
- unsigned int tr, cpu, error = 0;
+ unsigned int tr, cpu;
struct host_execution_env host_env;
struct Xgt_desc_struct desc;
__asm__ __volatile__ ("sidt (%0) \n" :: "a"(&desc) : "memory");
host_env.idtr_limit = desc.size;
host_env.idtr_base = desc.address;
- error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
+ __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
__asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
host_env.gdtr_limit = desc.size;
host_env.gdtr_base = desc.address;
- error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
+ __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
__asm__ __volatile__ ("str (%0) \n" :: "a"(&tr) : "memory");
host_env.tr_selector = tr;
host_env.tr_limit = sizeof(struct tss_struct);
host_env.tr_base = (unsigned long) &init_tss[cpu];
- error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
- error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
- error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
+ __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
+ __vmwrite(HOST_TR_BASE, host_env.tr_base);
+ __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
}
-static int construct_vmcs(struct vcpu *v)
+static void construct_vmcs(struct vcpu *v)
{
- int error = 0;
- unsigned long tmp, cr0, cr4;
+ unsigned long cr0, cr4;
union vmcs_arbytes arbytes;
vmx_vmcs_enter(v);
/* VMCS controls. */
- error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
- error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
- error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
+ __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
+ __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
+ __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
/* Host data selectors. */
- error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
#if defined(__i386__)
- error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
- error |= __vmwrite(HOST_FS_BASE, 0);
- error |= __vmwrite(HOST_GS_BASE, 0);
+ __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
+ __vmwrite(HOST_FS_BASE, 0);
+ __vmwrite(HOST_GS_BASE, 0);
#elif defined(__x86_64__)
- rdmsrl(MSR_FS_BASE, tmp); error |= __vmwrite(HOST_FS_BASE, tmp);
- rdmsrl(MSR_GS_BASE, tmp); error |= __vmwrite(HOST_GS_BASE, tmp);
+ {
+ unsigned long msr;
+ rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
+ rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
+ }
#endif
/* Host control registers. */
- error |= __vmwrite(HOST_CR0, read_cr0());
- error |= __vmwrite(HOST_CR4, read_cr4());
+ __vmwrite(HOST_CR0, read_cr0());
+ __vmwrite(HOST_CR4, read_cr4());
/* Host CS:RIP. */
- error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
- error |= __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
+ __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
+ __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
/* MSR intercepts. */
- error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
- error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
- error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
- error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
- error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
+ __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
- error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
+ __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
- error |= __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
- error |= __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
+ __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
+ __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
- error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
- error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+ __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
+ __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
- error |= __vmwrite(CR3_TARGET_COUNT, 0);
+ __vmwrite(CR3_TARGET_COUNT, 0);
- error |= __vmwrite(GUEST_ACTIVITY_STATE, 0);
+ __vmwrite(GUEST_ACTIVITY_STATE, 0);
/* Guest segment bases. */
- error |= __vmwrite(GUEST_ES_BASE, 0);
- error |= __vmwrite(GUEST_SS_BASE, 0);
- error |= __vmwrite(GUEST_DS_BASE, 0);
- error |= __vmwrite(GUEST_FS_BASE, 0);
- error |= __vmwrite(GUEST_GS_BASE, 0);
- error |= __vmwrite(GUEST_CS_BASE, 0);
+ __vmwrite(GUEST_ES_BASE, 0);
+ __vmwrite(GUEST_SS_BASE, 0);
+ __vmwrite(GUEST_DS_BASE, 0);
+ __vmwrite(GUEST_FS_BASE, 0);
+ __vmwrite(GUEST_GS_BASE, 0);
+ __vmwrite(GUEST_CS_BASE, 0);
/* Guest segment limits. */
- error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
- error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
+ __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
/* Guest segment AR bytes. */
arbytes.bytes = 0;
arbytes.fields.default_ops_size = 1; /* 32-bit */
arbytes.fields.g = 1;
arbytes.fields.null_bit = 0; /* not null */
- error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
arbytes.fields.seg_type = 0xb; /* type = 0xb */
- error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
/* Guest GDT. */
- error |= __vmwrite(GUEST_GDTR_BASE, 0);
- error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
+ __vmwrite(GUEST_GDTR_BASE, 0);
+ __vmwrite(GUEST_GDTR_LIMIT, 0);
/* Guest IDT. */
- error |= __vmwrite(GUEST_IDTR_BASE, 0);
- error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
+ __vmwrite(GUEST_IDTR_BASE, 0);
+ __vmwrite(GUEST_IDTR_LIMIT, 0);
/* Guest LDT and TSS. */
arbytes.fields.s = 0; /* not code or data segement */
arbytes.fields.seg_type = 0x2; /* LTD */
arbytes.fields.default_ops_size = 0; /* 16-bit */
arbytes.fields.g = 0;
- error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
- error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
+ __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
- __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (tmp));
- error |= __vmwrite(GUEST_DR7, tmp);
- error |= __vmwrite(VMCS_LINK_POINTER, ~0UL);
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
+ __vmwrite(GUEST_DR7, 0);
+ __vmwrite(VMCS_LINK_POINTER, ~0UL);
#if defined(__i386__)
- error |= __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
+ __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
#endif
- error |= __vmwrite(EXCEPTION_BITMAP,
- MONITOR_DEFAULT_EXCEPTION_BITMAP);
+ __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
/* Guest CR0. */
cr0 = read_cr0();
v->arch.hvm_vmx.cpu_cr0 = cr0;
- error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
- error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
/* Guest CR4. */
cr4 = read_cr4();
- error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
+ __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
v->arch.hvm_vmx.cpu_shadow_cr4 =
cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
- error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
#ifdef __x86_64__
/* VLAPIC TPR optimisation. */
v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
v->arch.hvm_vcpu.u.vmx.exec_control &=
~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vcpu.u.vmx.exec_control);
- error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
- page_to_maddr(vcpu_vlapic(v)->regs_page));
- error |= __vmwrite(TPR_THRESHOLD, 0);
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
+ __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_maddr(vcpu_vlapic(v)->regs_page));
+ __vmwrite(TPR_THRESHOLD, 0);
#endif
- error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
- error |= __vmwrite(GUEST_LDTR_BASE, 0);
- error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
+ __vmwrite(GUEST_LDTR_SELECTOR, 0);
+ __vmwrite(GUEST_LDTR_BASE, 0);
+ __vmwrite(GUEST_LDTR_LIMIT, 0);
- error |= __vmwrite(GUEST_TR_BASE, 0);
- error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
+ __vmwrite(GUEST_TR_BASE, 0);
+ __vmwrite(GUEST_TR_LIMIT, 0xff);
shadow_update_paging_modes(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
__vmwrite(HOST_CR3, v->arch.cr3);
vmx_vmcs_exit(v);
-
- return error;
}
int vmx_create_vmcs(struct vcpu *v)
return -ENOMEM;
__vmx_clear_vmcs(v);
-
- if ( construct_vmcs(v) != 0 )
- {
- vmx_free_vmcs(v->arch.hvm_vmx.vmcs);
- v->arch.hvm_vmx.vmcs = NULL;
- return -EINVAL;
- }
+
+ construct_vmcs(v);
return 0;
}
void vm_launch_fail(unsigned long eflags)
{
- unsigned long error;
- __vmread(VM_INSTRUCTION_ERROR, &error);
+ unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_launch_fail> error code %lx\n", error);
__hvm_bug(guest_cpu_user_regs());
}
void vm_resume_fail(unsigned long eflags)
{
- unsigned long error;
- __vmread(VM_INSTRUCTION_ERROR, &error);
+ unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
printk("<vm_resume_fail> error code %lx\n", error);
__hvm_bug(guest_cpu_user_regs());
}
{
uint32_t addr, j;
unsigned long val;
- int code;
+ int code, rc;
char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
char *err[4] = {"------ ", "------------------ ",
"---------- ", "------------------ "};
if (!(j&3))
printk("\n\t\t0x%08x: ", addr);
- if (!__vmread(addr, &val))
+ val = __vmread_safe(addr, &rc);
+ if (rc == 0)
printk(fmt[code], val);
else
printk("%s", err[code]);
/* XXX should it be GP fault */
domain_crash_synchronous();
- __vmread(GUEST_FS_BASE, &msr_content);
+ msr_content = __vmread(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
domain_crash_synchronous();
- __vmread(GUEST_GS_BASE, &msr_content);
+ msr_content = __vmread(GUEST_GS_BASE);
break;
case MSR_SHADOW_GS_BASE:
static inline void vmx_save_dr(struct vcpu *v)
{
- if ( v->arch.hvm_vcpu.flag_dr_dirty )
- {
- savedebug(&v->arch.guest_context, 0);
- savedebug(&v->arch.guest_context, 1);
- savedebug(&v->arch.guest_context, 2);
- savedebug(&v->arch.guest_context, 3);
- savedebug(&v->arch.guest_context, 6);
-
- v->arch.hvm_vcpu.flag_dr_dirty = 0;
-
- v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vcpu.u.vmx.exec_control);
- }
+ if ( !v->arch.hvm_vcpu.flag_dr_dirty )
+ return;
+
+ /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
+ v->arch.hvm_vcpu.flag_dr_dirty = 0;
+ v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
+
+ savedebug(&v->arch.guest_context, 0);
+ savedebug(&v->arch.guest_context, 1);
+ savedebug(&v->arch.guest_context, 2);
+ savedebug(&v->arch.guest_context, 3);
+ savedebug(&v->arch.guest_context, 6);
+ v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
}
static inline void __restore_debug_registers(struct vcpu *v)
loaddebug(&v->arch.guest_context, 3);
/* No 4 and 5 */
loaddebug(&v->arch.guest_context, 6);
- /* DR7 is loaded from the vmcs. */
+ /* DR7 is loaded from the VMCS. */
}
/*
* need to be restored if their value is going to affect execution -- i.e.,
* if one of the breakpoints is enabled. So mask out all bits that don't
* enable some breakpoint functionality.
- *
- * This is in part necessary because bit 10 of DR7 is hardwired to 1, so a
- * simple if( guest_dr7 ) will always return true. As long as we're masking,
- * we might as well do it right.
*/
#define DR7_ACTIVE_MASK 0xff
static inline void vmx_restore_dr(struct vcpu *v)
{
- unsigned long guest_dr7;
-
- __vmread(GUEST_DR7, &guest_dr7);
-
- /* Assumes guest does not have DR access at time of context switch. */
- if ( unlikely(guest_dr7 & DR7_ACTIVE_MASK) )
+ /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
+ if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
__restore_debug_registers(v);
}
if ( regs != NULL )
{
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_GS_SELECTOR, ®s->gs);
- __vmread(GUEST_FS_SELECTOR, ®s->fs);
- __vmread(GUEST_RIP, ®s->eip);
- __vmread(GUEST_RSP, ®s->esp);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->gs = __vmread(GUEST_GS_SELECTOR);
+ regs->fs = __vmread(GUEST_FS_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
+ regs->esp = __vmread(GUEST_RSP);
}
if ( crs != NULL )
{
crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
crs[2] = v->arch.hvm_vmx.cpu_cr2;
- __vmread(GUEST_CR3, &crs[3]);
+ crs[3] = __vmread(GUEST_CR3);
crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
}
*/
static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
{
- int err = 0;
unsigned long base;
- err |= __vmread(GUEST_ES_BASE, &base);
+ base = __vmread(GUEST_ES_BASE);
if (regs->es << 4 != base)
- err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
- err |= __vmread(GUEST_CS_BASE, &base);
+ __vmwrite(GUEST_ES_BASE, regs->es << 4);
+ base = __vmread(GUEST_CS_BASE);
if (regs->cs << 4 != base)
- err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
- err |= __vmread(GUEST_SS_BASE, &base);
+ __vmwrite(GUEST_CS_BASE, regs->cs << 4);
+ base = __vmread(GUEST_SS_BASE);
if (regs->ss << 4 != base)
- err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
- err |= __vmread(GUEST_DS_BASE, &base);
+ __vmwrite(GUEST_SS_BASE, regs->ss << 4);
+ base = __vmread(GUEST_DS_BASE);
if (regs->ds << 4 != base)
- err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
- err |= __vmread(GUEST_FS_BASE, &base);
+ __vmwrite(GUEST_DS_BASE, regs->ds << 4);
+ base = __vmread(GUEST_FS_BASE);
if (regs->fs << 4 != base)
- err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
- err |= __vmread(GUEST_GS_BASE, &base);
+ __vmwrite(GUEST_FS_BASE, regs->fs << 4);
+ base = __vmread(GUEST_GS_BASE);
if (regs->gs << 4 != base)
- err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
-
- BUG_ON(err);
+ __vmwrite(GUEST_GS_BASE, regs->gs << 4);
}
static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
ASSERT(v == current);
- __vmread(GUEST_RFLAGS, &rflags);
+ rflags = __vmread(GUEST_RFLAGS);
return rflags & X86_EFLAGS_VM;
}
ASSERT(v == current);
- __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+ cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
if ( vmx_long_mode_enabled(v) )
return ((cs_ar_bytes & (1u<<13)) ?
static int __get_instruction_length(void)
{
int len;
- __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
+ len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
if ( (len < 1) || (len > 15) )
__hvm_bug(guest_cpu_user_regs());
return len;
{
unsigned long current_eip;
- __vmread(GUEST_RIP, ¤t_eip);
+ current_eip = __vmread(GUEST_RIP);
__vmwrite(GUEST_RIP, current_eip + inst_len);
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
}
{
unsigned long eip, cs;
- __vmread(GUEST_CS_BASE, &cs);
- __vmread(GUEST_RIP, &eip);
+ cs = __vmread(GUEST_CS_BASE);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"vmx_do_page_fault = 0x%lx, cs_base=%lx, "
"eip = %lx, error_code = %lx\n",
#if 0
if ( !result )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
}
#endif
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_3, "(eax) 0x%08lx, (ebx) 0x%08lx, "
"(ecx) 0x%08lx, (edx) 0x%08lx, (esi) 0x%08lx, (edi) 0x%08lx",
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
eip, va);
/* INS can only use ES segment register, and it can't be overridden */
if ( dir == IOREQ_READ )
{
- __vmread(GUEST_ES_SELECTOR, &sel);
+ sel = __vmread(GUEST_ES_SELECTOR);
return sel == 0 ? 1 : 0;
}
case 0x67: /* addr32 */
continue;
case 0x2e: /* CS */
- __vmread(GUEST_CS_SELECTOR, &sel);
+ sel = __vmread(GUEST_CS_SELECTOR);
break;
case 0x36: /* SS */
- __vmread(GUEST_SS_SELECTOR, &sel);
+ sel = __vmread(GUEST_SS_SELECTOR);
break;
case 0x26: /* ES */
- __vmread(GUEST_ES_SELECTOR, &sel);
+ sel = __vmread(GUEST_ES_SELECTOR);
break;
case 0x64: /* FS */
- __vmread(GUEST_FS_SELECTOR, &sel);
+ sel = __vmread(GUEST_FS_SELECTOR);
break;
case 0x65: /* GS */
- __vmread(GUEST_GS_SELECTOR, &sel);
+ sel = __vmread(GUEST_GS_SELECTOR);
break;
case 0x3e: /* DS */
/* FALLTHROUGH */
default:
/* DS is the default */
- __vmread(GUEST_DS_SELECTOR, &sel);
+ sel = __vmread(GUEST_DS_SELECTOR);
}
return sel == 0 ? 1 : 0;
}
unsigned long addr, count = 1;
int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
- __vmread(GUEST_LINEAR_ADDRESS, &addr);
+ addr = __vmread(GUEST_LINEAR_ADDRESS);
/*
* In protected mode, guest linear address is invalid if the
}
}
-static int vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
+static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
{
- int error = 0;
-
/* NB. Skip transition instruction. */
- error |= __vmread(GUEST_RIP, &c->eip);
+ c->eip = __vmread(GUEST_RIP);
c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
- error |= __vmread(GUEST_RSP, &c->esp);
- error |= __vmread(GUEST_RFLAGS, &c->eflags);
+ c->esp = __vmread(GUEST_RSP);
+ c->eflags = __vmread(GUEST_RFLAGS);
c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
c->cr3 = v->arch.hvm_vmx.cpu_cr3;
c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
- error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
- error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
-
- error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
- error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
-
- error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
- error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
- error |= __vmread(GUEST_CS_BASE, &c->cs_base);
- error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
-
- error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
- error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
- error |= __vmread(GUEST_DS_BASE, &c->ds_base);
- error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
-
- error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
- error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
- error |= __vmread(GUEST_ES_BASE, &c->es_base);
- error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
-
- error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
- error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
- error |= __vmread(GUEST_SS_BASE, &c->ss_base);
- error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
-
- error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
- error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
- error |= __vmread(GUEST_FS_BASE, &c->fs_base);
- error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
-
- error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
- error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
- error |= __vmread(GUEST_GS_BASE, &c->gs_base);
- error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
-
- error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
- error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
- error |= __vmread(GUEST_TR_BASE, &c->tr_base);
- error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
-
- error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
- error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
- error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
- error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
-
- return !error;
+ c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
+ c->idtr_base = __vmread(GUEST_IDTR_BASE);
+
+ c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT);
+ c->gdtr_base = __vmread(GUEST_GDTR_BASE);
+
+ c->cs_sel = __vmread(GUEST_CS_SELECTOR);
+ c->cs_limit = __vmread(GUEST_CS_LIMIT);
+ c->cs_base = __vmread(GUEST_CS_BASE);
+ c->cs_arbytes.bytes = __vmread(GUEST_CS_AR_BYTES);
+
+ c->ds_sel = __vmread(GUEST_DS_SELECTOR);
+ c->ds_limit = __vmread(GUEST_DS_LIMIT);
+ c->ds_base = __vmread(GUEST_DS_BASE);
+ c->ds_arbytes.bytes = __vmread(GUEST_DS_AR_BYTES);
+
+ c->es_sel = __vmread(GUEST_ES_SELECTOR);
+ c->es_limit = __vmread(GUEST_ES_LIMIT);
+ c->es_base = __vmread(GUEST_ES_BASE);
+ c->es_arbytes.bytes = __vmread(GUEST_ES_AR_BYTES);
+
+ c->ss_sel = __vmread(GUEST_SS_SELECTOR);
+ c->ss_limit = __vmread(GUEST_SS_LIMIT);
+ c->ss_base = __vmread(GUEST_SS_BASE);
+ c->ss_arbytes.bytes = __vmread(GUEST_SS_AR_BYTES);
+
+ c->fs_sel = __vmread(GUEST_FS_SELECTOR);
+ c->fs_limit = __vmread(GUEST_FS_LIMIT);
+ c->fs_base = __vmread(GUEST_FS_BASE);
+ c->fs_arbytes.bytes = __vmread(GUEST_FS_AR_BYTES);
+
+ c->gs_sel = __vmread(GUEST_GS_SELECTOR);
+ c->gs_limit = __vmread(GUEST_GS_LIMIT);
+ c->gs_base = __vmread(GUEST_GS_BASE);
+ c->gs_arbytes.bytes = __vmread(GUEST_GS_AR_BYTES);
+
+ c->tr_sel = __vmread(GUEST_TR_SELECTOR);
+ c->tr_limit = __vmread(GUEST_TR_LIMIT);
+ c->tr_base = __vmread(GUEST_TR_BASE);
+ c->tr_arbytes.bytes = __vmread(GUEST_TR_AR_BYTES);
+
+ c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR);
+ c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT);
+ c->ldtr_base = __vmread(GUEST_LDTR_BASE);
+ c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
}
-static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
+static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
unsigned long mfn, old_base_mfn;
- int error = 0;
- error |= __vmwrite(GUEST_RIP, c->eip);
- error |= __vmwrite(GUEST_RSP, c->esp);
- error |= __vmwrite(GUEST_RFLAGS, c->eflags);
+ __vmwrite(GUEST_RIP, c->eip);
+ __vmwrite(GUEST_RSP, c->esp);
+ __vmwrite(GUEST_RFLAGS, c->eflags);
v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
- error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
- if (!vmx_paging_enabled(v))
+ if ( !vmx_paging_enabled(v) )
goto skip_cr3;
- if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
+ if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
+ {
/*
* This is simple TLB flush, implying the guest has
* removed some translation or changed page attributes.
* We simply invalidate the shadow.
*/
mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
- if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
+ if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
+ {
printk("Invalid CR3 value=%x", c->cr3);
domain_crash_synchronous();
- return 0;
}
- } else {
+ }
+ else
+ {
/*
* If different, make a shadow. Check if the PDBR is valid
* first.
{
printk("Invalid CR3 value=%x", c->cr3);
domain_crash_synchronous();
- return 0;
}
- if(!get_page(mfn_to_page(mfn), v->domain))
- return 0;
+ if ( !get_page(mfn_to_page(mfn), v->domain) )
+ domain_crash_synchronous();
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
if (old_base_mfn)
}
skip_cr3:
-
- if (!vmx_paging_enabled(v))
+ if ( !vmx_paging_enabled(v) )
HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
else
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
- error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+ __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
- error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
-
- error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
- error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
-
- error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
- error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
-
- error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
- error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
- error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
- error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
- error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
- error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
- error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
-
- error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
- error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
- error |= __vmwrite(GUEST_ES_BASE, c->es_base);
- error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
-
- error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
- error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
- error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
- error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
-
- error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
- error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
- error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
- error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
- error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
- error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
- error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
-
- error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
- error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
- error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
- error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
-
- error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
- error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
- error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
- error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+
+ __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
+ __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
+
+ __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
+ __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
+
+ __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
+ __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
+ __vmwrite(GUEST_CS_BASE, c->cs_base);
+ __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
+
+ __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
+ __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
+ __vmwrite(GUEST_DS_BASE, c->ds_base);
+ __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
+
+ __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
+ __vmwrite(GUEST_ES_LIMIT, c->es_limit);
+ __vmwrite(GUEST_ES_BASE, c->es_base);
+ __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
+
+ __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
+ __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
+ __vmwrite(GUEST_SS_BASE, c->ss_base);
+ __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
+
+ __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
+ __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
+ __vmwrite(GUEST_FS_BASE, c->fs_base);
+ __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
+
+ __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
+ __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
+ __vmwrite(GUEST_GS_BASE, c->gs_base);
+ __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
+
+ __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
+ __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
+ __vmwrite(GUEST_TR_BASE, c->tr_base);
+ __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
+
+ __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
+ __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
+ __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
+ __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
shadow_update_paging_modes(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
-
- return !error;
}
enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
goto error;
if (cp != 0) {
- if (!vmx_world_save(v, &c))
- goto error;
+ vmx_world_save(v, &c);
if (hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
goto error;
}
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- if (!vmx_world_restore(v, &c))
- goto error;
+ vmx_world_restore(v, &c);
v->arch.hvm_vmx.vmxassist_enabled = 1;
return 1;
}
if (cp != 0) {
if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
goto error;
- if (!vmx_world_restore(v, &c))
- goto error;
+ vmx_world_restore(v, &c);
v->arch.hvm_vmx.vmxassist_enabled = 0;
return 1;
}
HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
|= EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value |= VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
{
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
&= ~EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Transfering control to vmxassist %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
}
else if ( v->arch.hvm_vmx.vmxassist_enabled )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Enabling CR0.PE at %%eip 0x%lx\n", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
{
- __vmread(GUEST_RIP, &eip);
+ eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1,
"Restoring to %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
{
v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
&= ~EFER_LMA;
- __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
CASE_GET_REG(EDI, edi);
CASE_EXTEND_GET_REG;
case REG_ESP:
- __vmread(GUEST_RSP, &value);
+ value = __vmread(GUEST_RSP);
break;
default:
printk("invalid gp: %d\n", gp);
msr_content = hvm_get_guest_time(v);
break;
case MSR_IA32_SYSENTER_CS:
- __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
+ msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_ESP:
- __vmread(GUEST_SYSENTER_ESP, &msr_content);
+ msr_content = __vmread(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_SYSENTER_EIP:
- __vmread(GUEST_SYSENTER_EIP, &msr_content);
+ msr_content = __vmread(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_APICBASE:
msr_content = vcpu_vlapic(v)->apic_base_msr;
static void vmx_do_hlt(void)
{
unsigned long rflags;
- __vmread(GUEST_RFLAGS, &rflags);
+ rflags = __vmread(GUEST_RFLAGS);
hvm_hlt(rflags);
}
static inline void vmx_do_extint(struct cpu_user_regs *regs)
{
unsigned int vector;
- int error;
asmlinkage void do_IRQ(struct cpu_user_regs *);
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
#endif
- if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
- && !(vector & INTR_INFO_VALID_MASK))
- __hvm_bug(regs);
+ vector = __vmread(VM_EXIT_INTR_INFO);
+ BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
TRACE_VMEXIT(1, vector);
#if defined (__x86_64__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_RSP, ®s->rsp);
- __vmread(GUEST_RFLAGS, ®s->rflags);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_RIP, ®s->rip);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->rsp = __vmread(GUEST_RSP);
+ regs->rflags = __vmread(GUEST_RFLAGS);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->rip = __vmread(GUEST_RIP);
}
#elif defined (__i386__)
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_RSP, ®s->esp);
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_CS_SELECTOR, ®s->cs);
- __vmread(GUEST_DS_SELECTOR, ®s->ds);
- __vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_RIP, ®s->eip);
+ regs->ss = __vmread(GUEST_SS_SELECTOR);
+ regs->esp = __vmread(GUEST_RSP);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->cs = __vmread(GUEST_CS_SELECTOR);
+ regs->ds = __vmread(GUEST_DS_SELECTOR);
+ regs->es = __vmread(GUEST_ES_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
}
#endif
#ifdef XEN_DEBUGGER
void save_cpu_user_regs(struct cpu_user_regs *regs)
{
- __vmread(GUEST_SS_SELECTOR, ®s->xss);
- __vmread(GUEST_RSP, ®s->esp);
- __vmread(GUEST_RFLAGS, ®s->eflags);
- __vmread(GUEST_CS_SELECTOR, ®s->xcs);
- __vmread(GUEST_RIP, ®s->eip);
-
- __vmread(GUEST_GS_SELECTOR, ®s->xgs);
- __vmread(GUEST_FS_SELECTOR, ®s->xfs);
- __vmread(GUEST_ES_SELECTOR, ®s->xes);
- __vmread(GUEST_DS_SELECTOR, ®s->xds);
+ regs->xss = __vmread(GUEST_SS_SELECTOR);
+ regs->esp = __vmread(GUEST_RSP);
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->xcs = __vmread(GUEST_CS_SELECTOR);
+ regs->eip = __vmread(GUEST_RIP);
+
+ regs->xgs = __vmread(GUEST_GS_SELECTOR);
+ regs->xfs = __vmread(GUEST_FS_SELECTOR);
+ regs->xes = __vmread(GUEST_ES_SELECTOR);
+ regs->xds = __vmread(GUEST_DS_SELECTOR);
}
void restore_cpu_user_regs(struct cpu_user_regs *regs)
{
int error_code, intr_info, vector;
- __vmread(VM_EXIT_INTR_INFO, &intr_info);
+ intr_info = __vmread(VM_EXIT_INTR_INFO);
vector = intr_info & 0xff;
if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
- __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
+ error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
else
error_code = VMX_DELIVER_NO_ERROR_CODE;
{
unsigned long rip;
- __vmread(GUEST_RIP, &rip);
+ rip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
rip, error_code);
}
unsigned long exit_qualification, inst_len = 0;
struct vcpu *v = current;
- __vmread(VM_EXIT_REASON, &exit_reason);
+ exit_reason = __vmread(VM_EXIT_REASON);
perfc_incra(vmexits, exit_reason);
{
unsigned int failed_vmentry_reason = exit_reason & 0xFFFF;
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
switch ( failed_vmentry_reason ) {
case EXIT_REASON_INVALID_GUEST_STATE:
*/
unsigned int intr_info, vector;
- if ( __vmread(VM_EXIT_INTR_INFO, &intr_info) ||
- !(intr_info & INTR_INFO_VALID_MASK) )
- __hvm_bug(regs);
+ intr_info = __vmread(VM_EXIT_INTR_INFO);
+ BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
vector = intr_info & INTR_INFO_VECTOR_MASK;
}
case TRAP_page_fault:
{
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
- __vmread(VM_EXIT_INTR_ERROR_CODE, ®s->error_code);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
+ regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
TRACE_VMEXIT(3, regs->error_code);
TRACE_VMEXIT(4, exit_qualification);
{
inst_len = __get_instruction_length(); /* Safe: INVLPG */
__update_guest_eip(inst_len);
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_do_invlpg(exit_qualification);
TRACE_VMEXIT(4, exit_qualification);
break;
}
case EXIT_REASON_CR_ACCESS:
{
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
if ( vmx_cr_access(exit_qualification, regs) )
__update_guest_eip(inst_len);
break;
}
case EXIT_REASON_DR_ACCESS:
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_dr_access(exit_qualification, regs);
break;
case EXIT_REASON_IO_INSTRUCTION:
- __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
vmx_io_instruction(exit_qualification, inst_len);
TRACE_VMEXIT(4, exit_qualification);