unsigned long *store_mfn)
{
struct xen_domctl launch_domctl, domctl;
- int rc, i;
- vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
+ vcpu_guest_context_t ctxt;
+ int rc;
if ( (image == NULL) || (image_size == 0) )
{
goto error_out;
}
- if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
- {
- PERROR("%s: ctxt mlock failed", __func__);
- return 1;
- }
-
domctl.cmd = XEN_DOMCTL_getdomaininfo;
domctl.domain = (domid_t)domid;
if ( (xc_domctl(xc_handle, &domctl) < 0) ||
goto error_out;
}
- memset(ctxt, 0, sizeof(*ctxt));
+ memset(&ctxt, 0, sizeof(ctxt));
if ( setup_guest(xc_handle, domid, memsize, image, image_size,
- ctxt, domctl.u.getdomaininfo.shared_info_frame,
+ &ctxt, domctl.u.getdomaininfo.shared_info_frame,
vcpus, pae, acpi, store_evtchn, store_mfn) < 0)
{
ERROR("Error constructing guest OS");
goto error_out;
}
- /* FPU is set up to default initial state. */
- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
- /* Virtual IDT is empty at start-of-day. */
- for ( i = 0; i < 256; i++ )
+ if ( lock_pages(&ctxt, sizeof(ctxt) ) )
{
- ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
+ PERROR("%s: ctxt mlock failed", __func__);
+ goto error_out;
}
- /* No LDT. */
- ctxt->ldt_ents = 0;
-
- /* Use the default Xen-provided GDT. */
- ctxt->gdt_ents = 0;
-
- /* No debugging. */
- memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
-
- /* No callback handlers. */
-#if defined(__i386__)
- ctxt->event_callback_cs = FLAT_KERNEL_CS;
- ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
- ctxt->failsafe_callback_eip = 0;
-#elif defined(__x86_64__)
- ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_eip = 0;
- ctxt->syscall_callback_eip = 0;
-#endif
-
memset(&launch_domctl, 0, sizeof(launch_domctl));
-
launch_domctl.domain = (domid_t)domid;
launch_domctl.u.vcpucontext.vcpu = 0;
- set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, ctxt);
-
+ set_xen_guest_handle(launch_domctl.u.vcpucontext.ctxt, &ctxt);
launch_domctl.cmd = XEN_DOMCTL_setvcpucontext;
rc = xc_domctl(xc_handle, &launch_domctl);
+ unlock_pages(&ctxt, sizeof(ctxt));
+
return rc;
error_out:
/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
}
- else if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ else
{
hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
}
}
-/* SVM-specific intitialization code for VCPU application processors */
-static void svm_init_ap_context(struct vcpu_guest_context *ctxt,
- int vcpuid, int trampoline_vector)
+static void svm_init_ap_context(
+ struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
{
- int i;
- struct vcpu *v, *bsp = current;
- struct domain *d = bsp->domain;
- cpu_user_regs_t *regs;;
-
-
- if ((v = d->vcpu[vcpuid]) == NULL)
- {
- printk("vcpuid %d is invalid! good-bye.\n", vcpuid);
- domain_crash_synchronous();
- }
- regs = &v->arch.guest_context.user_regs;
-
memset(ctxt, 0, sizeof(*ctxt));
- for (i = 0; i < 256; ++i)
- {
- ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
-
/*
* We execute the trampoline code in real mode. The trampoline vector
vmcb->rax = regs->eax;
vmcb->ss.sel = regs->ss;
vmcb->rsp = regs->esp;
- vmcb->rflags = regs->eflags;
+ vmcb->rflags = regs->eflags | 2UL;
vmcb->cs.sel = regs->cs;
vmcb->rip = regs->eip;
if (regs->eflags & EF_TF)
unsigned long crn;
segment_attributes_t attrib;
unsigned long dr7;
- unsigned long eflags;
unsigned long shadow_cr;
struct vmcb_struct *vmcb = arch_svm->vmcb;
vmcb->rsp = 0;
vmcb->rip = regs->eip;
- eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
- eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
-
- vmcb->rflags = eflags;
+ vmcb->rflags = regs->eflags | 2UL; /* inc. reserved bit */
__asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
vmcb->dr7 = dr7;
vmx_free_vmcs(vmcs);
}
-#define GUEST_LAUNCH_DS 0x08
-#define GUEST_LAUNCH_CS 0x10
#define GUEST_SEGMENT_LIMIT 0xffffffff
-#define HOST_SEGMENT_LIMIT 0xffffffff
struct host_execution_env {
/* selectors */
v->arch.schedule_tail = arch_vmx_do_resume;
}
-static int construct_vmcs(struct vcpu *v, cpu_user_regs_t *regs)
+static int construct_vmcs(struct vcpu *v)
{
int error = 0;
- unsigned long tmp, eflags;
+ unsigned long tmp;
union vmcs_arbytes arbytes;
+ vmx_vmcs_enter(v);
+
/* VMCS controls. */
error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
error |= __vmwrite(GUEST_ACTIVITY_STATE, 0);
- /* Guest selectors. */
- error |= __vmwrite(GUEST_ES_SELECTOR, GUEST_LAUNCH_DS);
- error |= __vmwrite(GUEST_SS_SELECTOR, GUEST_LAUNCH_DS);
- error |= __vmwrite(GUEST_DS_SELECTOR, GUEST_LAUNCH_DS);
- error |= __vmwrite(GUEST_FS_SELECTOR, GUEST_LAUNCH_DS);
- error |= __vmwrite(GUEST_GS_SELECTOR, GUEST_LAUNCH_DS);
- error |= __vmwrite(GUEST_CS_SELECTOR, GUEST_LAUNCH_CS);
-
/* Guest segment bases. */
error |= __vmwrite(GUEST_ES_BASE, 0);
error |= __vmwrite(GUEST_SS_BASE, 0);
arbytes.fields.seg_type = 0xb; /* 32-bit TSS (busy) */
error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
- error |= __vmwrite(GUEST_RSP, 0);
- error |= __vmwrite(GUEST_RIP, regs->eip);
-
- /* Guest EFLAGS. */
- eflags = regs->eflags & ~HVM_EFLAGS_RESERVED_0; /* clear 0s */
- eflags |= HVM_EFLAGS_RESERVED_1; /* set 1s */
- error |= __vmwrite(GUEST_RFLAGS, eflags);
-
error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
__asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (tmp));
error |= __vmwrite(GUEST_DR7, tmp);
error |= __vmwrite(EXCEPTION_BITMAP,
MONITOR_DEFAULT_EXCEPTION_BITMAP);
- if ( regs->eflags & EF_TF )
- error |= __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
- else
- error |= __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
+ vmx_vmcs_exit(v);
return error;
}
{
if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
return -ENOMEM;
+
__vmx_clear_vmcs(v);
+
+ if ( construct_vmcs(v) != 0 )
+ {
+ vmx_free_vmcs(v->arch.hvm_vmx.vmcs);
+ v->arch.hvm_vmx.vmcs = NULL;
+ return -EINVAL;
+ }
+
return 0;
}
void arch_vmx_do_launch(struct vcpu *v)
{
- cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs;
-
vmx_load_vmcs(v);
-
- if ( construct_vmcs(v, regs) < 0 )
- {
- if ( v->vcpu_id == 0 ) {
- printk("Failed to construct VMCS for BSP.\n");
- } else {
- printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
- }
- domain_crash_synchronous();
- }
-
vmx_do_launch(v);
reset_stack_and_jump(vmx_asm_do_vmentry);
}
{
int rc;
+ spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
+
v->arch.schedule_tail = arch_vmx_do_launch;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
return rc;
}
- spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
-
return 0;
}
__vmwrite(GUEST_RSP, regs->esp);
- __vmwrite(GUEST_RFLAGS, regs->eflags);
+ /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
+ __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
if (regs->eflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
else
vmx_vmcs_exit(v);
}
-/* SMP VMX guest support */
-static void vmx_init_ap_context(struct vcpu_guest_context *ctxt,
- int vcpuid, int trampoline_vector)
+static void vmx_init_ap_context(
+ struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
{
- int i;
-
memset(ctxt, 0, sizeof(*ctxt));
-
- /*
- * Initial register values:
- */
ctxt->user_regs.eip = VMXASSIST_BASE;
ctxt->user_regs.edx = vcpuid;
ctxt->user_regs.ebx = trampoline_vector;
-
- /* Virtual IDT is empty at start-of-day. */
- for ( i = 0; i < 256; i++ )
- {
- ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
-
- /* No callback handlers. */
-#if defined(__i386__)
- ctxt->event_callback_cs = FLAT_KERNEL_CS;
- ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
-#endif
}
void do_nmi(struct cpu_user_regs *);
#define VMX_DELIVER_NO_ERROR_CODE -1
-/*
- * This works for both 32bit & 64bit eflags filteration
- * done in construct_init_vmc[sb]_guest()
- */
-#define HVM_EFLAGS_RESERVED_0 0xffc08028 /* bitmap for 0 */
-#define HVM_EFLAGS_RESERVED_1 0x00000002 /* bitmap for 1 */
-
#if HVM_DEBUG
#define DBG_LEVEL_0 (1 << 0)
#define DBG_LEVEL_1 (1 << 1)