error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
}
-/* Update CR3, CR0, CR4, GDT, LDT, TR */
+#if 0
static void vmx_do_launch(struct vcpu *v)
{
- unsigned int error = 0;
- unsigned long cr0, cr4;
-
if ( v->vcpu_id != 0 )
{
/* Sync AP's TSC with BSP's */
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
}
-
- __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
-
- error |= __vmwrite(GUEST_CR0, cr0);
- cr0 &= ~X86_CR0_PG;
- error |= __vmwrite(CR0_READ_SHADOW, cr0);
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
- v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
-
- __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
-
- error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
- cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
-
- error |= __vmwrite(CR4_READ_SHADOW, cr4);
-
- hvm_stts(v);
-
- if ( vlapic_init(v) == 0 )
- {
-#ifdef __x86_64__
- u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
- u64 vapic_page_addr =
- page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page);
-
- *cpu_exec_control |= CPU_BASED_TPR_SHADOW;
- *cpu_exec_control &= ~CPU_BASED_CR8_STORE_EXITING;
- *cpu_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING;
- error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
- error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR, vapic_page_addr);
- error |= __vmwrite(TPR_THRESHOLD, 0);
-#endif
- }
-
- vmx_set_host_env(v);
- init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-
- error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
- error |= __vmwrite(GUEST_LDTR_BASE, 0);
- error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
-
- error |= __vmwrite(GUEST_TR_BASE, 0);
- error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
-
- shadow_update_paging_modes(v);
-
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
- __vmwrite(HOST_CR3, v->arch.cr3);
-
- v->arch.schedule_tail = arch_vmx_do_resume;
}
+#endif
static int construct_vmcs(struct vcpu *v)
{
int error = 0;
- unsigned long tmp;
+ unsigned long tmp, cr0, cr4;
union vmcs_arbytes arbytes;
vmx_vmcs_enter(v);
error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
+ error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
+ v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
/* Host data selectors. */
error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
error |= __vmwrite(EXCEPTION_BITMAP,
MONITOR_DEFAULT_EXCEPTION_BITMAP);
+ /* Guest CR0. */
+ cr0 = read_cr0();
+ v->arch.hvm_vmx.cpu_cr0 = cr0;
+ error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+ v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+ error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+
+ /* Guest CR4. */
+ cr4 = read_cr4();
+ error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
+ v->arch.hvm_vmx.cpu_shadow_cr4 =
+ cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
+ error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+
+ /* XXX Move this out. */
+ init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
+ if ( vlapic_init(v) != 0 )
+ return -1;
+
+#ifdef __x86_64__
+ /* VLAPIC TPR optimisation. */
+ v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
+ v->arch.hvm_vcpu.u.vmx.exec_control &=
+ ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
+ error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
+ v->arch.hvm_vcpu.u.vmx.exec_control);
+ error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_maddr(v->arch.hvm_vcpu.vlapic->regs_page));
+ error |= __vmwrite(TPR_THRESHOLD, 0);
+#endif
+
+ error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
+ error |= __vmwrite(GUEST_LDTR_BASE, 0);
+ error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
+
+ error |= __vmwrite(GUEST_TR_BASE, 0);
+ error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
+
+ shadow_update_paging_modes(v);
+ __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+ __vmwrite(HOST_CR3, v->arch.cr3);
+
vmx_vmcs_exit(v);
return error;
reset_stack_and_jump(vmx_asm_do_vmentry);
}
-void arch_vmx_do_launch(struct vcpu *v)
-{
- vmx_load_vmcs(v);
- vmx_do_launch(v);
- reset_stack_and_jump(vmx_asm_do_vmentry);
-}
-
-
/* Dump a section of VMCS */
static void print_section(char *header, uint32_t start,
uint32_t end, int incr)
spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
- v->arch.schedule_tail = arch_vmx_do_launch;
+ v->arch.schedule_tail = arch_vmx_do_resume;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
if ( crs != NULL )
{
- __vmread(CR0_READ_SHADOW, &crs[0]);
+ crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
crs[2] = v->arch.hvm_vmx.cpu_cr2;
__vmread(GUEST_CR3, &crs[3]);
- __vmread(CR4_READ_SHADOW, &crs[4]);
+ crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
}
vmx_vmcs_exit(v);
/* Make sure that xen intercepts any FP accesses from current */
static void vmx_stts(struct vcpu *v)
{
- unsigned long cr0;
-
/* VMX depends on operating on the current vcpu */
ASSERT(v == current);
* then this is not necessary: no FPU activity can occur until the guest
* clears CR0.TS, and we will initialise the FPU when that happens.
*/
- __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
- if ( !(cr0 & X86_CR0_TS) )
+ if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
{
- __vmread_vcpu(v, GUEST_CR0, &cr0);
- __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
+ v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
}
X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16);
}
+static int vmx_pae_enabled(struct vcpu *v)
+{
+ unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+ return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
+}
+
/* Setup HVM interfaces */
static void vmx_setup_hvm_funcs(void)
{
static void vmx_do_no_device_fault(void)
{
- unsigned long cr0;
struct vcpu *v = current;
setup_fpu(current);
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
- __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
- if ( !(cr0 & X86_CR0_TS) )
+ if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
{
- __vmread_vcpu(v, GUEST_CR0, &cr0);
- cr0 &= ~X86_CR0_TS;
- __vmwrite(GUEST_CR0, cr0);
+ v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
}
}
error |= __vmread(GUEST_RSP, &c->esp);
error |= __vmread(GUEST_RFLAGS, &c->eflags);
- error |= __vmread(CR0_READ_SHADOW, &c->cr0);
+ c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
c->cr3 = v->arch.hvm_vmx.cpu_cr3;
- error |= __vmread(CR4_READ_SHADOW, &c->cr4);
+ c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
error |= __vmwrite(GUEST_RSP, c->esp);
error |= __vmwrite(GUEST_RFLAGS, c->eflags);
- error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
+ v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
+ error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
if (!vmx_paging_enabled(v))
goto skip_cr3;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
- error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
+ v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
+ error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
/*
* CR0: We don't want to lose PE and PG.
*/
- __vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0);
+ old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG);
/* TS cleared? Then initialise FPU now. */
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
- __vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
- __vmwrite(CR0_READ_SHADOW, value);
+ v->arch.hvm_vmx.cpu_cr0 = value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE;
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
+
+ v->arch.hvm_vmx.cpu_shadow_cr0 = value;
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
}
case 4: /* CR4 */
{
- __vmread(CR4_READ_SHADOW, &old_cr);
+ old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
- if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
+ if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
{
if ( vmx_pgbit_test(v) )
{
}
__vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
- __vmwrite(CR4_READ_SHADOW, value);
+ v->arch.hvm_vmx.cpu_shadow_cr4 = value;
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
/*
* Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
setup_fpu(v);
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
- __vmread_vcpu(v, GUEST_CR0, &value);
- value &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(GUEST_CR0, value);
+ v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
+ __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
- __vmread_vcpu(v, CR0_READ_SHADOW, &value);
- value &= ~X86_CR0_TS; /* clear TS */
- __vmwrite(CR0_READ_SHADOW, value);
+ v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
break;
case TYPE_LMSW:
- __vmread_vcpu(v, CR0_READ_SHADOW, &value);
+ value = v->arch.hvm_vmx.cpu_shadow_cr0;
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
TRACE_VMEXIT(1, TYPE_LMSW);
// - changes in CR0.PG, CR4.PAE, CR4.PSE, or CR4.PGE
//
- // Avoid determining the current shadow mode for uninitialized CPUs, as
- // we can not yet determine whether it is an HVM or PV domain.
- //
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
- {
- SHADOW_PRINTK("%s: postponing determination of shadow mode\n", __func__);
- return;
- }
-
// First, tear down any old shadow tables held by this vcpu.
//
shadow_detach_old_tables(v);
v->arch.shadow.translate_enabled = !!hvm_paging_enabled(v);
if ( !v->arch.shadow.translate_enabled )
{
-
/* Set v->arch.guest_table to use the p2m map, and choose
* the appropriate shadow mode */
old_guest_table = pagetable_get_mfn(v->arch.guest_table);
sh_detach_old_tables(v);
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
{
ASSERT(v->arch.cr3 == 0);
return;
unsigned long cpu_shadow_cr4; /* copy of guest read shadow CR4 */
unsigned long cpu_cr2; /* save CR2 */
unsigned long cpu_cr3;
- unsigned long cpu_based_exec_control;
struct vmx_msr_state msr_content;
unsigned long vmxassist_enabled:1;
};
extern void vmx_asm_do_vmentry(void);
extern void vmx_intr_assist(void);
extern void vmx_migrate_timers(struct vcpu *v);
-extern void arch_vmx_do_launch(struct vcpu *);
extern void arch_vmx_do_resume(struct vcpu *);
extern void set_guest_time(struct vcpu *v, u64 gtime);
return rc;
}
-
-static always_inline void __vmwrite_vcpu(
- struct vcpu *v, unsigned long field, unsigned long value)
-{
- switch ( field ) {
- case CR0_READ_SHADOW:
- v->arch.hvm_vmx.cpu_shadow_cr0 = value;
- break;
- case GUEST_CR0:
- v->arch.hvm_vmx.cpu_cr0 = value;
- break;
- case CR4_READ_SHADOW:
- v->arch.hvm_vmx.cpu_shadow_cr4 = value;
- break;
- case CPU_BASED_VM_EXEC_CONTROL:
- v->arch.hvm_vmx.cpu_based_exec_control = value;
- break;
- default:
- printk("__vmwrite_cpu: invalid field %lx\n", field);
- break;
- }
-}
-
-static always_inline void __vmread_vcpu(
- struct vcpu *v, unsigned long field, unsigned long *value)
-{
- switch ( field ) {
- case CR0_READ_SHADOW:
- *value = v->arch.hvm_vmx.cpu_shadow_cr0;
- break;
- case GUEST_CR0:
- *value = v->arch.hvm_vmx.cpu_cr0;
- break;
- case CR4_READ_SHADOW:
- *value = v->arch.hvm_vmx.cpu_shadow_cr4;
- break;
- case CPU_BASED_VM_EXEC_CONTROL:
- *value = v->arch.hvm_vmx.cpu_based_exec_control;
- break;
- default:
- printk("__vmread_vcpu: invalid field %lx\n", field);
- break;
- }
-}
-
static inline int __vmwrite(unsigned long field, unsigned long value)
{
- struct vcpu *v = current;
int rc;
__asm__ __volatile__ ( VMWRITE_OPCODE
: "0" (0), "a" (field) , "c" (value)
: "memory");
- switch ( field ) {
- case CR0_READ_SHADOW:
- case GUEST_CR0:
- case CR4_READ_SHADOW:
- case CPU_BASED_VM_EXEC_CONTROL:
- __vmwrite_vcpu(v, field, value);
- break;
- }
-
return rc;
}
static inline int vmx_paging_enabled(struct vcpu *v)
{
- unsigned long cr0;
- __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
+ unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));
}
-static inline int vmx_pae_enabled(struct vcpu *v)
-{
- unsigned long cr4;
- __vmread_vcpu(v, CR4_READ_SHADOW, &cr4);
- return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
-}
-
static inline int vmx_long_mode_enabled(struct vcpu *v)
{
u64 efer = v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER];
static inline int vmx_pgbit_test(struct vcpu *v)
{
- unsigned long cr0;
-
- __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
+ unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
return (cr0 & X86_CR0_PG);
}