return rc;
}
+static int hvmemul_vmfunc(
+ struct x86_emulate_ctxt *ctxt)
+{
+ int rc;
+
+ rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
+ if ( rc != X86EMUL_OKAY )
+ hvmemul_inject_hw_exception(TRAP_invalid_op, 0, ctxt);
+
+ return rc;
+}
+
static const struct x86_emulate_ops hvm_emulate_ops = {
.read = hvmemul_read,
.insn_fetch = hvmemul_insn_fetch,
.inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
- .invlpg = hvmemul_invlpg
+ .invlpg = hvmemul_invlpg,
+ .vmfunc = hvmemul_vmfunc,
};
static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
.inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
- .invlpg = hvmemul_invlpg
+ .invlpg = hvmemul_invlpg,
+ .vmfunc = hvmemul_vmfunc,
};
static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
static void vmx_invlpg_intercept(unsigned long vaddr);
+static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
uint8_t __read_mostly posted_intr_vector;
vmx_vmcs_exit(v);
}
+static int vmx_vcpu_emulate_vmfunc(struct cpu_user_regs *regs)
+{
+ int rc = X86EMUL_EXCEPTION;
+ struct vcpu *curr = current;
+
+ if ( !cpu_has_vmx_vmfunc && altp2m_active(curr->domain) &&
+ regs->_eax == 0 &&
+ p2m_switch_vcpu_altp2m_by_id(curr, regs->_ecx) )
+ rc = X86EMUL_OKAY;
+
+ return rc;
+}
+
static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
{
bool_t rc = 0;
.msr_read_intercept = vmx_msr_read_intercept,
.msr_write_intercept = vmx_msr_write_intercept,
.invlpg_intercept = vmx_invlpg_intercept,
+ .vmfunc_intercept = vmx_vmfunc_intercept,
.handle_cd = vmx_handle_cd,
.set_info_guest = vmx_set_info_guest,
.set_rdtsc_exiting = vmx_set_rdtsc_exiting,
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
.altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
+ .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
};
const struct hvm_function_table * __init start_vmx(void)
vpid_sync_vcpu_gva(curr, vaddr);
}
+static int vmx_vmfunc_intercept(struct cpu_user_regs *regs)
+{
+ /*
+ * This handler is a placeholder for future where Xen may
+ * want to handle VMFUNC exits and resume a domain normally without
+ * injecting a #UD to the guest - for example, in a VT-nested
+ * scenario where Xen may want to lazily shadow the alternate
+ * EPTP list.
+ */
+ gdprintk(XENLOG_ERR, "Failed guest VMFUNC execution\n");
+ return X86EMUL_EXCEPTION;
+}
+
static int vmx_cr_access(unsigned long exit_qualification)
{
struct vcpu *curr = current;
update_guest_eip();
break;
+ case EXIT_REASON_VMFUNC:
+ if ( vmx_vmfunc_intercept(regs) != X86EMUL_OKAY )
+ hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ else
+ update_guest_eip();
+ break;
+
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
case EXIT_REASON_GETSEC:
break;
}
+ no_writeback:
/* Inject #DB if single-step tracing was enabled at instruction start. */
if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
(ops->inject_hw_exception != NULL) )
struct segment_register reg;
unsigned long base, limit, cr0, cr0w;
- if ( modrm == 0xdf ) /* invlpga */
+ switch( modrm )
{
+ case 0xdf: /* invlpga */
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->invlpg == NULL);
if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.eax),
ctxt)) )
goto done;
- break;
- }
-
- if ( modrm == 0xf9 ) /* rdtscp */
- {
+ goto no_writeback;
+ case 0xf9: /* rdtscp */ {
uint64_t tsc_aux;
fail_if(ops->read_msr == NULL);
if ( (rc = ops->read_msr(MSR_TSC_AUX, &tsc_aux, ctxt)) != 0 )
_regs.ecx = (uint32_t)tsc_aux;
goto rdtsc;
}
+ case 0xd4: /* vmfunc */
+ generate_exception_if(lock_prefix | rep_prefix() | (vex.pfx == vex_66),
+ EXC_UD, -1);
+ fail_if(ops->vmfunc == NULL);
+ if ( (rc = ops->vmfunc(ctxt) != X86EMUL_OKAY) )
+ goto done;
+ goto no_writeback;
+ }
switch ( modrm_reg & 7 )
{
enum x86_segment seg,
unsigned long offset,
struct x86_emulate_ctxt *ctxt);
+
+ /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
+ int (*vmfunc)(
+ struct x86_emulate_ctxt *ctxt);
};
struct cpu_user_regs;
int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
void (*invlpg_intercept)(unsigned long vaddr);
+ int (*vmfunc_intercept)(struct cpu_user_regs *regs);
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
+ int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs);
};
extern struct hvm_function_table hvm_funcs;