{
if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
return X86EMUL_RETRY;
- hvm_inject_exception(TRAP_page_fault, pfec, addr);
+ hvm_inject_page_fault(pfec, addr);
return X86EMUL_EXCEPTION;
}
ASSERT(!reverse);
if ( npfn != INVALID_GFN )
return X86EMUL_UNHANDLEABLE;
- hvm_inject_exception(TRAP_page_fault, pfec, addr & PAGE_MASK);
+ hvm_inject_page_fault(pfec, addr & PAGE_MASK);
return X86EMUL_EXCEPTION;
}
*reps = done;
}
/* Inject pending hw/sw trap */
- if (v->arch.hvm_vcpu.inject_trap != -1)
+ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
- hvm_inject_exception(v->arch.hvm_vcpu.inject_trap,
- v->arch.hvm_vcpu.inject_error_code,
- v->arch.hvm_vcpu.inject_cr2);
- v->arch.hvm_vcpu.inject_trap = -1;
+ hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
+ v->arch.hvm_vcpu.inject_trap.vector = -1;
}
}
spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
- v->arch.hvm_vcpu.inject_trap = -1;
+ v->arch.hvm_vcpu.inject_trap.vector = -1;
#ifdef CONFIG_COMPAT
rc = setup_compat_arg_xlat(v);
domain_shutdown(v->domain, SHUTDOWN_reboot);
}
-void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
+void hvm_inject_trap(struct hvm_trap *trap)
{
struct vcpu *curr = current;
if ( nestedhvm_enabled(curr->domain) &&
!nestedhvm_vmswitch_in_progress(curr) &&
nestedhvm_vcpu_in_guestmode(curr) &&
- nhvm_vmcx_guest_intercepts_trap(curr, trapnr, errcode) )
+ nhvm_vmcx_guest_intercepts_trap(
+ curr, trap->vector, trap->error_code) )
{
enum nestedhvm_vmexits nsret;
- nsret = nhvm_vcpu_vmexit_trap(curr, trapnr, errcode, cr2);
+ nsret = nhvm_vcpu_vmexit_trap(curr, trap);
switch ( nsret )
{
}
}
- hvm_funcs.inject_exception(trapnr, errcode, cr2);
+ hvm_funcs.inject_trap(trap);
+}
+
+void hvm_inject_hw_exception(unsigned int trapnr, int errcode)
+{
+ struct hvm_trap trap = {
+ .vector = trapnr,
+ .type = X86_EVENTTYPE_HW_EXCEPTION,
+ .error_code = errcode };
+ hvm_inject_trap(&trap);
+}
+
+void hvm_inject_page_fault(int errcode, unsigned long cr2)
+{
+ struct hvm_trap trap = {
+ .vector = TRAP_page_fault,
+ .type = X86_EVENTTYPE_HW_EXCEPTION,
+ .error_code = errcode,
+ .cr2 = cr2 };
+ hvm_inject_trap(&trap);
}
int hvm_hap_nested_page_fault(unsigned long gpa,
return -1;
case NESTEDHVM_PAGEFAULT_MMIO:
if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 1;
}
}
{
put_gfn(p2m->domain, gfn);
if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
rc = 1;
goto out;
}
{
gdprintk(XENLOG_WARNING,
"trying to write to read-only grant mapping\n");
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
rc = 1;
goto out_put_gfn;
}
return 0;
err:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return -1;
}
{
gdprintk(XENLOG_WARNING, "Trying to set reserved bit in "
"EFER: 0x%"PRIx64"\n", value);
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
{
gdprintk(XENLOG_WARNING,
"Trying to change EFER.LME with paging enabled\n");
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gpf:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gpf:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
unmap_and_fail:
hvm_unmap_entry(pdesc);
fail:
- hvm_inject_exception(fault_type, sel & 0xfffc, 0);
+ hvm_inject_hw_exception(fault_type, sel & 0xfffc);
hvm_map_fail:
return 1;
}
if ( ((tss_sel & 0xfff8) + 7) > gdt.limit )
{
- hvm_inject_exception((taskswitch_reason == TSW_iret) ?
+ hvm_inject_hw_exception((taskswitch_reason == TSW_iret) ?
TRAP_invalid_tss : TRAP_gp_fault,
- tss_sel & 0xfff8, 0);
+ tss_sel & 0xfff8);
goto out;
}
if ( !tr.attr.fields.p )
{
- hvm_inject_exception(TRAP_no_segment, tss_sel & 0xfff8, 0);
+ hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
goto out;
}
if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
{
- hvm_inject_exception(
+ hvm_inject_hw_exception(
(taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
- tss_sel & 0xfff8, 0);
+ tss_sel & 0xfff8);
goto out;
}
if ( tr.limit < (sizeof(tss)-1) )
{
- hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0);
+ hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
goto out;
}
goto out;
if ( (tss.trace & 1) && !exn_raised )
- hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
+ hvm_inject_hw_exception(TRAP_debug, tss_sel & 0xfff8);
tr.attr.fields.type = 0xb; /* busy 32-bit tss */
hvm_set_segment_register(v, x86_seg_tr, &tr);
if ( pfec == PFEC_page_shared )
return HVMCOPY_gfn_shared;
if ( flags & HVMCOPY_fault )
- hvm_inject_exception(TRAP_page_fault, pfec, addr);
+ hvm_inject_page_fault(pfec, addr);
return HVMCOPY_bad_gva_to_gfn;
}
}
return ret;
gp_fault:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
ret = X86EMUL_EXCEPTION;
*msr_content = -1ull;
goto out;
return ret;
gp_fault:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
goto param_fail8;
- if ( v->arch.hvm_vcpu.inject_trap != -1 )
+ if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
rc = -EBUSY;
else
{
- v->arch.hvm_vcpu.inject_trap = tr.trap;
- v->arch.hvm_vcpu.inject_error_code = tr.error_code;
- v->arch.hvm_vcpu.inject_cr2 = tr.cr2;
+ v->arch.hvm_vcpu.inject_trap.vector = tr.trap;
+ v->arch.hvm_vcpu.inject_trap.error_code = tr.error_code;
+ v->arch.hvm_vcpu.inject_trap.cr2 = tr.cr2;
}
param_fail8:
return -EOPNOTSUPP;
}
-int
-nhvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
- int errcode, unsigned long cr2)
+int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
{
- return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trapnr, errcode, cr2);
+ return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap);
}
uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
return 0;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+ hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
break;
default:
break;
/* Not OK: fetches from non-RAM pages are not supportable. */
gdprintk(XENLOG_WARNING, "Bad instruction fetch at %#lx (%#lx)\n",
(unsigned long) guest_cpu_user_regs()->eip, addr);
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 0;
}
return 1;
gdprintk(XENLOG_WARNING,
"%s: Mismatch between expected and actual instruction bytes: "
"eip = %lx\n", __func__, (unsigned long)vmcb->rip);
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 0;
done:
default:
gdprintk(XENLOG_ERR,
"nsvm_vcpu_vmentry failed, injecting #UD\n");
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
- /* Must happen after hvm_inject_exception or it doesn't work right. */
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ /* Must happen after hvm_inject_hw_exception or it doesn't work right. */
nv->nv_vmswitch_in_progress = 0;
return 1;
}
}
int
-nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
- int errcode, unsigned long cr2)
+nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
{
ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL);
- nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trapnr, errcode, cr2);
+ nestedsvm_vmexit_defer(v, VMEXIT_EXCEPTION_DE + trap->vector,
+ trap->error_code, trap->cr2);
return NESTEDHVM_VMEXIT_DONE;
}
}
if ( nv->nv_vmexit_pending ) {
- /* hvm_inject_exception() must have run before.
+ /* hvm_inject_hw_exception() must have run before.
* exceptions have higher priority than interrupts.
*/
return hvm_intblk_rflags_ie;
unsigned int inst_len;
if ( !nestedhvm_enabled(v->domain) ) {
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
vintr_t intr;
if ( !nestedhvm_enabled(v->domain) ) {
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
curr->arch.hvm_svm.vmcb->interrupt_shadow = 0;
if ( regs->eflags & X86_EFLAGS_TF )
- hvm_inject_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
}
static void svm_cpu_down(void)
passive_domain_destroy(v);
}
-static void svm_inject_exception(
- unsigned int trapnr, int errcode, unsigned long cr2)
+static void svm_inject_trap(struct hvm_trap *trap)
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
eventinj_t event = vmcb->eventinj;
+ struct hvm_trap _trap = *trap;
- switch ( trapnr )
+ switch ( _trap.vector )
{
case TRAP_debug:
if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
__restore_debug_registers(curr);
vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000);
}
+ if ( cpu_has_monitor_trap_flag )
+ break;
+ /* fall through */
case TRAP_int3:
if ( curr->domain->debugger_attached )
{
if ( unlikely(event.fields.v) &&
(event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
{
- trapnr = hvm_combine_hw_exceptions(event.fields.vector, trapnr);
- if ( trapnr == TRAP_double_fault )
- errcode = 0;
+ _trap.vector = hvm_combine_hw_exceptions(
+ event.fields.vector, _trap.vector);
+ if ( _trap.vector == TRAP_double_fault )
+ _trap.error_code = 0;
}
event.bytes = 0;
event.fields.v = 1;
event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
- event.fields.vector = trapnr;
- event.fields.ev = (errcode != HVM_DELIVER_NO_ERROR_CODE);
- event.fields.errorcode = errcode;
+ event.fields.vector = _trap.vector;
+ event.fields.ev = (_trap.error_code != HVM_DELIVER_NO_ERROR_CODE);
+ event.fields.errorcode = _trap.error_code;
vmcb->eventinj = event;
- if ( trapnr == TRAP_page_fault )
+ if ( _trap.vector == TRAP_page_fault )
{
- curr->arch.hvm_vcpu.guest_cr[2] = cr2;
- vmcb_set_cr2(vmcb, cr2);
- HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2));
+ curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
+ vmcb_set_cr2(vmcb, _trap.cr2);
+ HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, TRC_PAR_LONG(_trap.cr2));
}
else
{
- HVMTRACE_2D(INJ_EXC, trapnr, errcode);
+ HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
}
}
{
/* Check if l1 guest must make FPU ready for the l2 guest */
if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS )
- hvm_inject_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE);
else
vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
return;
return X86EMUL_OKAY;
gpf:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gpf:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
{
if (!nestedhvm_enabled(v->domain)) {
gdprintk(XENLOG_ERR, "VMRUN: nestedhvm disabled, injecting #UD\n");
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
if (!nestedsvm_vmcb_map(v, vmcbaddr)) {
gdprintk(XENLOG_ERR, "VMRUN: mapping vmcb failed, injecting #UD\n");
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return;
}
return;
inject:
- hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE);
return;
}
return;
inject:
- hvm_inject_exception(ret, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(ret, HVM_DELIVER_NO_ERROR_CODE);
return;
}
switch ( rc )
{
case X86EMUL_UNHANDLEABLE:
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+ hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
/* fall through */
default:
hvm_emulate_writeback(&ctxt);
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
- .inject_exception = svm_inject_exception,
+ .inject_trap = svm_inject_trap,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
.do_pmu_interrupt = svm_do_pmu_interrupt,
break;
}
- hvm_inject_exception(TRAP_page_fault, regs->error_code, va);
+ hvm_inject_page_fault(regs->error_code, va);
break;
}
__update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);
}
else if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
svm_vmexit_do_cr_access(vmcb, regs);
else if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
case VMEXIT_INVLPG:
__update_guest_eip(regs, vmcb->nextrip - vmcb->rip);
}
else if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
case VMEXIT_INVLPGA:
case VMEXIT_MONITOR:
case VMEXIT_MWAIT:
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case VMEXIT_VMRUN:
svm_vmexit_do_clgi(regs, v);
break;
case VMEXIT_SKINIT:
- hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case VMEXIT_XSETBV:
}
else if ( intack.source == hvm_intsrc_mce )
{
- vmx_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
+ hvm_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
}
else
{
uncanonical_address:
HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr);
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return HNDL_exception_raised;
}
nvmx->intr.intr_info, nvmx->intr.error_code);
}
-static int nvmx_vmexit_exceptions(struct vcpu *v, unsigned int trapnr,
- int errcode, unsigned long cr2)
+static int nvmx_vmexit_trap(struct vcpu *v, struct hvm_trap *trap)
{
- nvmx_enqueue_n2_exceptions(v, trapnr, errcode);
+ nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code);
return NESTEDHVM_VMEXIT_DONE;
}
curr->arch.hvm_vmx.vmx_emulate = 1;
}
-void vmx_inject_hw_exception(int trap, int error_code)
+void vmx_inject_extint(int trap)
+{
+ struct vcpu *v = current;
+ u32 pin_based_cntrl;
+
+ if ( nestedhvm_vcpu_in_guestmode(v) ) {
+ pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+ PIN_BASED_VM_EXEC_CONTROL);
+ if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) {
+ nvmx_enqueue_n2_exceptions (v,
+ INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap,
+ HVM_DELIVER_NO_ERROR_CODE);
+ return;
+ }
+ }
+ __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR,
+ HVM_DELIVER_NO_ERROR_CODE);
+}
+
+void vmx_inject_nmi(void)
+{
+ struct vcpu *v = current;
+ u32 pin_based_cntrl;
+
+ if ( nestedhvm_vcpu_in_guestmode(v) ) {
+ pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+ PIN_BASED_VM_EXEC_CONTROL);
+ if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) {
+ nvmx_enqueue_n2_exceptions (v,
+ INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi,
+ HVM_DELIVER_NO_ERROR_CODE);
+ return;
+ }
+ }
+ __vmx_inject_exception(2, X86_EVENTTYPE_NMI,
+ HVM_DELIVER_NO_ERROR_CODE);
+}
+
+static void vmx_inject_trap(struct hvm_trap *trap)
{
unsigned long intr_info;
struct vcpu *curr = current;
+ struct hvm_trap _trap = *trap;
- int type = X86_EVENTTYPE_HW_EXCEPTION;
+ if ( (_trap.vector == TRAP_page_fault) &&
+ (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) )
+ current->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
if ( nestedhvm_vcpu_in_guestmode(curr) )
intr_info = vcpu_2_nvmx(curr).intr.intr_info;
else
intr_info = __vmread(VM_ENTRY_INTR_INFO);
- switch ( trap )
+ switch ( _trap.vector )
{
case TRAP_debug:
- type = X86_EVENTTYPE_SW_EXCEPTION;
if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
if ( cpu_has_monitor_trap_flag )
break;
/* fall through */
-
case TRAP_int3:
if ( curr->domain->debugger_attached )
{
domain_pause_for_debugger();
return;
}
-
- type = X86_EVENTTYPE_SW_EXCEPTION;
- __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
- break;
-
- default:
- if ( trap > TRAP_last_reserved )
- {
- type = X86_EVENTTYPE_SW_EXCEPTION;
- __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 2); /* int imm8 */
- }
- break;
}
if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
(((intr_info >> 8) & 7) == X86_EVENTTYPE_HW_EXCEPTION) )
{
- trap = hvm_combine_hw_exceptions((uint8_t)intr_info, trap);
- if ( trap == TRAP_double_fault )
- error_code = 0;
+ _trap.vector = hvm_combine_hw_exceptions(
+ (uint8_t)intr_info, _trap.vector);
+ if ( _trap.vector == TRAP_double_fault )
+ _trap.error_code = 0;
}
if ( nestedhvm_vcpu_in_guestmode(curr) &&
- nvmx_intercepts_exception(curr, trap, error_code) )
+ nvmx_intercepts_exception(curr, _trap.vector, _trap.error_code) )
{
nvmx_enqueue_n2_exceptions (curr,
- INTR_INFO_VALID_MASK | (type<<8) | trap,
- error_code);
+ INTR_INFO_VALID_MASK | (_trap.type<<8) | _trap.vector,
+ _trap.error_code);
return;
}
else
- __vmx_inject_exception(trap, type, error_code);
+ __vmx_inject_exception(_trap.vector, _trap.type, _trap.error_code);
- if ( trap == TRAP_page_fault )
- HVMTRACE_LONG_2D(PF_INJECT, error_code,
+ if ( (_trap.vector == TRAP_page_fault) &&
+ (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) )
+ HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code,
TRC_PAR_LONG(current->arch.hvm_vcpu.guest_cr[2]));
else
- HVMTRACE_2D(INJ_EXC, trap, error_code);
-}
-
-void vmx_inject_extint(int trap)
-{
- struct vcpu *v = current;
- u32 pin_based_cntrl;
-
- if ( nestedhvm_vcpu_in_guestmode(v) ) {
- pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
- PIN_BASED_VM_EXEC_CONTROL);
- if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) {
- nvmx_enqueue_n2_exceptions (v,
- INTR_INFO_VALID_MASK | (X86_EVENTTYPE_EXT_INTR<<8) | trap,
- HVM_DELIVER_NO_ERROR_CODE);
- return;
- }
- }
- __vmx_inject_exception(trap, X86_EVENTTYPE_EXT_INTR,
- HVM_DELIVER_NO_ERROR_CODE);
-}
-
-void vmx_inject_nmi(void)
-{
- struct vcpu *v = current;
- u32 pin_based_cntrl;
-
- if ( nestedhvm_vcpu_in_guestmode(v) ) {
- pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
- PIN_BASED_VM_EXEC_CONTROL);
- if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) {
- nvmx_enqueue_n2_exceptions (v,
- INTR_INFO_VALID_MASK | (X86_EVENTTYPE_NMI<<8) | TRAP_nmi,
- HVM_DELIVER_NO_ERROR_CODE);
- return;
- }
- }
- __vmx_inject_exception(2, X86_EVENTTYPE_NMI,
- HVM_DELIVER_NO_ERROR_CODE);
-}
-
-static void vmx_inject_exception(
- unsigned int trapnr, int errcode, unsigned long cr2)
-{
- if ( trapnr == TRAP_page_fault )
- current->arch.hvm_vcpu.guest_cr[2] = cr2;
-
- vmx_inject_hw_exception(trapnr, errcode);
+ HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
}
static int vmx_event_pending(struct vcpu *v)
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
- .inject_exception = vmx_inject_exception,
+ .inject_trap = vmx_inject_trap,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
.do_pmu_interrupt = vmx_do_pmu_interrupt,
.nhvm_vcpu_hostcr3 = nvmx_vcpu_hostcr3,
.nhvm_vcpu_asid = nvmx_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
- .nhvm_vcpu_vmexit_trap = nvmx_vmexit_exceptions,
+ .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
.nhvm_intr_blocked = nvmx_intr_blocked
};
}
if ( regs->eflags & X86_EFLAGS_TF )
- vmx_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
}
static void vmx_fpu_dirty_intercept(void)
return X86EMUL_OKAY;
gp_fault:
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
if ( (rc < 0) ||
(vmx_add_host_load_msr(msr) < 0) )
- vmx_inject_hw_exception(TRAP_machine_check, 0);
+ hvm_inject_hw_exception(TRAP_machine_check, 0);
else
{
__vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
return X86EMUL_OKAY;
gp_fault:
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
switch ( rc )
{
case X86EMUL_UNHANDLEABLE:
- vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_exception(ctxt.exn_vector, ctxt.exn_error_code, 0);
+ hvm_inject_hw_exception(ctxt.exn_vector, ctxt.exn_error_code);
/* fall through */
default:
hvm_emulate_writeback(&ctxt);
if ( handled < 0 )
{
- vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 0);
+ struct hvm_trap trap = {
+ .vector = TRAP_int3,
+ .type = X86_EVENTTYPE_SW_EXCEPTION,
+ .error_code = HVM_DELIVER_NO_ERROR_CODE
+ };
+ hvm_inject_trap(&trap);
break;
}
else if ( handled )
break;
}
- v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
- vmx_inject_hw_exception(TRAP_page_fault, regs->error_code);
+ hvm_inject_page_fault(regs->error_code, exit_qualification);
break;
case TRAP_nmi:
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
* as far as vmexit.
*/
WARN_ON(exit_reason == EXIT_REASON_GETSEC);
- vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
break;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
case EXIT_REASON_APIC_ACCESS:
if ( !vmx_handle_eoi_write() && !handle_mmio() )
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
case EXIT_REASON_IO_INSTRUCTION:
{
/* INS, OUTS */
if ( !handle_mmio() )
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
else
{
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
return 1;
gdprintk(XENLOG_WARNING, "Debug Store is not supported on this cpu\n");
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 0;
}
}
case MSR_CORE_PERF_GLOBAL_STATUS:
gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
"MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 1;
case MSR_IA32_PEBS_ENABLE:
if ( msr_content & 1 )
gdprintk(XENLOG_WARNING,
"Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
msr_content);
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return 1;
}
core2_vpmu_cxt->pmu_enable->ds_area_enable = msr_content ? 1 : 0;
break;
}
if (inject_gp)
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
else
wrmsrl(msr, msr_content);
}
invalid_op:
gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n");
- hvm_inject_exception(TRAP_invalid_op, 0, 0);
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
return X86EMUL_EXCEPTION;
gp_fault:
gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: gp_fault\n");
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
if ( !okay )
{
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
if ( gfn == INVALID_GFN )
{
if ( is_hvm_vcpu(v) )
- hvm_inject_exception(TRAP_page_fault, pfec, vaddr);
+ hvm_inject_page_fault(pfec, vaddr);
else
propagate_page_fault(vaddr, pfec);
return _mfn(BAD_GVA_TO_GFN);
#define HVM_HAP_SUPERPAGE_2MB 0x00000001
#define HVM_HAP_SUPERPAGE_1GB 0x00000002
+struct hvm_trap {
+ int vector;
+ unsigned int type; /* X86_EVENTTYPE_* */
+ int error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */
+ unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
+};
+
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
void (*set_tsc_offset)(struct vcpu *v, u64 offset);
- void (*inject_exception)(unsigned int trapnr, int errcode,
- unsigned long cr2);
+ void (*inject_trap)(struct hvm_trap *trap);
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
struct cpu_user_regs *regs);
int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs,
uint64_t exitcode);
- int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v,
- unsigned int trapnr,
- int errcode,
- unsigned long cr2);
+ int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap);
uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v);
uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
-void hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2);
+void hvm_inject_trap(struct hvm_trap *trap);
+void hvm_inject_hw_exception(unsigned int trapnr, int errcode);
+void hvm_inject_page_fault(int errcode, unsigned long cr2);
static inline int hvm_event_pending(struct vcpu *v)
{
/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
* 'trapnr' exception.
*/
-int nhvm_vcpu_vmexit_trap(struct vcpu *v,
- unsigned int trapnr, int errcode, unsigned long cr2);
+int nhvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
/* returns l2 guest cr3 in l2 guest physical address space. */
uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs);
int nsvm_vcpu_vmexit_inject(struct vcpu *v, struct cpu_user_regs *regs,
uint64_t exitcode);
-int nsvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr,
- int errcode, unsigned long cr2);
+int nsvm_vcpu_vmexit_trap(struct vcpu *v, struct hvm_trap *trap);
uint64_t nsvm_vcpu_guestcr3(struct vcpu *v);
uint64_t nsvm_vcpu_hostcr3(struct vcpu *v);
uint32_t nsvm_vcpu_asid(struct vcpu *v);
/* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
void *fpu_exception_callback_arg;
- /* Pending hw/sw interrupt */
- int inject_trap; /* -1 for nothing to inject */
- int inject_error_code;
- unsigned long inject_cr2;
+
+ /* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
+ struct hvm_trap inject_trap;
struct viridian_vcpu viridian;
};
return rc;
}
-void vmx_inject_hw_exception(int trap, int error_code);
void vmx_inject_extint(int trap);
void vmx_inject_nmi(void);