return hvmemul_write(seg, offset, p_new, bytes, ctxt);
}
+static int hvmemul_validate(
+ const struct x86_emulate_state *state,
+ struct x86_emulate_ctxt *ctxt)
+{
+ const struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+ return !hvmemul_ctxt->validate || hvmemul_ctxt->validate(state, ctxt)
+ ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
+}
+
static int hvmemul_rep_ins(
uint16_t src_port,
enum x86_segment dst_seg,
.insn_fetch = hvmemul_insn_fetch,
.write = hvmemul_write,
.cmpxchg = hvmemul_cmpxchg,
+ .validate = hvmemul_validate,
.rep_ins = hvmemul_rep_ins,
.rep_outs = hvmemul_rep_outs,
.rep_movs = hvmemul_rep_movs,
else
ops = &hvm_ro_emulate_ops_mmio;
- hvm_emulate_init_once(&ctxt, guest_cpu_user_regs());
+ hvm_emulate_init_once(&ctxt, x86_insn_is_mem_write,
+ guest_cpu_user_regs());
ctxt.ctxt.data = &mmio_ro_ctxt;
rc = _hvm_emulate_one(&ctxt, ops);
switch ( rc )
struct hvm_emulate_ctxt ctx = {{ 0 }};
int rc;
- hvm_emulate_init_once(&ctx, guest_cpu_user_regs());
+ hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
switch ( kind )
{
void hvm_emulate_init_once(
struct hvm_emulate_ctxt *hvmemul_ctxt,
+ hvm_emulate_validate_t *validate,
struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
+ hvmemul_ctxt->validate = validate;
hvmemul_ctxt->ctxt.regs = regs;
hvmemul_ctxt->ctxt.vendor = curr->domain->arch.x86_vendor;
hvmemul_ctxt->ctxt.force_writeback = true;
return X86EMUL_EXCEPTION;
}
+static bool is_cross_vendor(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ switch ( ctxt->opcode )
+ {
+ case X86EMUL_OPC(0x0f, 0x05): /* syscall */
+ case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
+ case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
+ return true;
+ }
+
+ return false;
+}
+
void hvm_ud_intercept(struct cpu_user_regs *regs)
{
struct vcpu *cur = current;
cur->domain->arch.x86_vendor != boot_cpu_data.x86_vendor;
struct hvm_emulate_ctxt ctxt;
- hvm_emulate_init_once(&ctxt, regs);
+ hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
if ( opt_hvm_fep )
{
gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
}
-bool handle_mmio(void)
+bool hvm_emulate_one_insn(hvm_emulate_validate_t *validate)
{
struct hvm_emulate_ctxt ctxt;
struct vcpu *curr = current;
ASSERT(!is_pvh_vcpu(curr));
- hvm_emulate_init_once(&ctxt, guest_cpu_user_regs());
+ hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
rc = hvm_emulate_one(&ctxt);
{
struct hvm_emulate_ctxt ctxt;
- hvm_emulate_init_once(&ctxt, guest_cpu_user_regs());
+ hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
vmx_realmode_emulate_one(&ctxt);
hvm_emulate_writeback(&ctxt);
#endif
ASSERT(v == current);
- hvm_emulate_init_once(&ctxt, guest_cpu_user_regs());
+ hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
hvm_emulate_init_per_insn(&ctxt, NULL, 0);
state = x86_decode_insn(&ctxt.ctxt, hvmemul_insn_fetch);
if ( IS_ERR_OR_NULL(state) )
paging_invlpg(current, vaddr);
}
+static bool is_invlpg(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ unsigned int ext;
+
+ return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) &&
+ x86_insn_modrm(state, NULL, &ext) != 3 &&
+ (ext & 7) == 7;
+}
+
static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
{
svm_asid_g_invlpg(v, vaddr);
if ( handle_pio(port, bytes, dir) )
__update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);
}
- else if ( !handle_mmio() )
+ else if ( !hvm_emulate_one_insn(x86_insn_is_portio) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
svm_vmexit_do_cr_access(vmcb, regs);
- else if ( !handle_mmio() )
+ else if ( !hvm_emulate_one_insn(x86_insn_is_cr_access) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
svm_invlpg_intercept(vmcb->exitinfo1);
__update_guest_eip(regs, vmcb->nextrip - vmcb->rip);
}
- else if ( !handle_mmio() )
+ else if ( !hvm_emulate_one_insn(is_invlpg) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
break;
if ( intr_info & INTR_INFO_VALID_MASK )
__vmwrite(VM_ENTRY_INTR_INFO, 0);
- hvm_emulate_init_once(&hvmemul_ctxt, regs);
+ hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs);
/* Only deliver interrupts into emulated real mode. */
if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
{
/* INS, OUTS */
if ( unlikely(is_pvh_vcpu(v)) /* PVH fixme */ ||
- !handle_mmio() )
+ !hvm_emulate_one_insn(x86_insn_is_portio) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
else
emulate_fpu_insn_memsrc("flds", src.val);
dst.type = OP_NONE;
break;
- case 2: /* fstp m32fp */
+ case 2: /* fst m32fp */
emulate_fpu_insn_memdst("fsts", dst.val);
dst.bytes = 4;
break;
return state->ea.mem.off;
}
+bool
+x86_insn_is_mem_access(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ if ( state->ea.type == OP_MEM )
+ return ctxt->opcode != 0x8d /* LEA */ &&
+ (ctxt->opcode != X86EMUL_OPC(0x0f, 0x01) ||
+ (state->modrm_reg & 7) != 7) /* INVLPG */;
+
+ switch ( ctxt->opcode )
+ {
+ case 0x6c ... 0x6f: /* INS / OUTS */
+ case 0xa4 ... 0xa7: /* MOVS / CMPS */
+ case 0xaa ... 0xaf: /* STOS / LODS / SCAS */
+ case 0xd7: /* XLAT */
+ return true;
+
+ case X86EMUL_OPC(0x0f, 0x01):
+ /* Cover CLZERO. */
+ return (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7;
+ }
+
+ return false;
+}
+
+bool
+x86_insn_is_mem_write(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ switch ( state->desc & DstMask )
+ {
+ case DstMem:
+ return state->modrm_mod != 3;
+
+ case DstBitBase:
+ case DstImplicit:
+ break;
+
+ default:
+ return false;
+ }
+
+ if ( state->modrm_mod == 3 )
+ /* CLZERO is the odd one. */
+ return ctxt->opcode == X86EMUL_OPC(0x0f, 0x01) &&
+ (state->modrm_rm & 7) == 4 && (state->modrm_reg & 7) == 7;
+
+ switch ( ctxt->opcode )
+ {
+ case 0x6c: case 0x6d: /* INS */
+ case 0xa4: case 0xa5: /* MOVS */
+ case 0xaa: case 0xab: /* STOS */
+ case X86EMUL_OPC(0x0f, 0x11): /* MOVUPS */
+ case X86EMUL_OPC_VEX(0x0f, 0x11): /* VMOVUPS */
+ case X86EMUL_OPC_66(0x0f, 0x11): /* MOVUPD */
+ case X86EMUL_OPC_VEX_66(0x0f, 0x11): /* VMOVUPD */
+ case X86EMUL_OPC_F3(0x0f, 0x11): /* MOVSS */
+ case X86EMUL_OPC_VEX_F3(0x0f, 0x11): /* VMOVSS */
+ case X86EMUL_OPC_F2(0x0f, 0x11): /* MOVSD */
+ case X86EMUL_OPC_VEX_F2(0x0f, 0x11): /* VMOVSD */
+ case X86EMUL_OPC(0x0f, 0x29): /* MOVAPS */
+ case X86EMUL_OPC_VEX(0x0f, 0x29): /* VMOVAPS */
+ case X86EMUL_OPC_66(0x0f, 0x29): /* MOVAPD */
+ case X86EMUL_OPC_VEX_66(0x0f, 0x29): /* VMOVAPD */
+ case X86EMUL_OPC(0x0f, 0x2b): /* MOVNTPS */
+ case X86EMUL_OPC_VEX(0x0f, 0x2b): /* VMOVNTPS */
+ case X86EMUL_OPC_66(0x0f, 0x2b): /* MOVNTPD */
+ case X86EMUL_OPC_VEX_66(0x0f, 0x2b): /* VMOVNTPD */
+ case X86EMUL_OPC(0x0f, 0x7e): /* MOVD/MOVQ */
+ case X86EMUL_OPC_66(0x0f, 0x7e): /* MOVD/MOVQ */
+ case X86EMUL_OPC_VEX_66(0x0f, 0x7e): /* VMOVD/VMOVQ */
+ case X86EMUL_OPC(0x0f, 0x7f): /* VMOVQ */
+ case X86EMUL_OPC_66(0x0f, 0x7f): /* MOVDQA */
+ case X86EMUL_OPC_VEX_66(0x0f, 0x7f): /* VMOVDQA */
+ case X86EMUL_OPC_F3(0x0f, 0x7f): /* MOVDQU */
+ case X86EMUL_OPC_VEX_F3(0x0f, 0x7f): /* VMOVDQU */
+ case X86EMUL_OPC(0x0f, 0xab): /* BTS */
+ case X86EMUL_OPC(0x0f, 0xb3): /* BTR */
+ case X86EMUL_OPC(0x0f, 0xbb): /* BTC */
+ case X86EMUL_OPC_66(0x0f, 0xd6): /* MOVQ */
+ case X86EMUL_OPC_VEX_66(0x0f, 0xd6): /* VMOVQ */
+ case X86EMUL_OPC(0x0f, 0xe7): /* MOVNTQ */
+ case X86EMUL_OPC_66(0x0f, 0xe7): /* MOVNTDQ */
+ case X86EMUL_OPC_VEX_66(0x0f, 0xe7): /* VMOVNTDQ */
+ return true;
+
+ case 0xd9:
+ switch ( state->modrm_reg & 7 )
+ {
+ case 2: /* FST m32fp */
+ case 3: /* FSTP m32fp */
+ case 6: /* FNSTENV */
+ case 7: /* FNSTCW */
+ return true;
+ }
+ break;
+
+ case 0xdb:
+ switch ( state->modrm_reg & 7 )
+ {
+ case 1: /* FISTTP m32i */
+ case 2: /* FIST m32i */
+ case 3: /* FISTP m32i */
+ case 7: /* FSTP m80fp */
+ return true;
+ }
+ break;
+
+ case 0xdd:
+ switch ( state->modrm_reg & 7 )
+ {
+ case 1: /* FISTTP m64i */
+ case 2: /* FST m64fp */
+ case 3: /* FSTP m64fp */
+ case 6: /* FNSAVE */
+ case 7: /* FNSTSW */
+ return true;
+ }
+ break;
+
+ case 0xdf:
+ switch ( state->modrm_reg & 7 )
+ {
+ case 1: /* FISTTP m16i */
+ case 2: /* FIST m16i */
+ case 3: /* FISTP m16i */
+ case 6: /* FBSTP */
+ case 7: /* FISTP m64i */
+ return true;
+ }
+ break;
+
+ case X86EMUL_OPC(0x0f, 0x01):
+ return !(state->modrm_reg & 6); /* SGDT / SIDT */
+
+ case X86EMUL_OPC(0x0f, 0xba):
+ return (state->modrm_reg & 7) > 4; /* BTS / BTR / BTC */
+ }
+
+ return false;
+}
+
+bool
+x86_insn_is_portio(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ switch ( ctxt->opcode )
+ {
+ case 0x6c ... 0x6f: /* INS / OUTS */
+ case 0xe4 ... 0xe7: /* IN / OUT imm8 */
+ case 0xec ... 0xef: /* IN / OUT %dx */
+ return true;
+ }
+
+ return false;
+}
+
+bool
+x86_insn_is_cr_access(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt)
+{
+ switch ( ctxt->opcode )
+ {
+ unsigned int ext;
+
+ case X86EMUL_OPC(0x0f, 0x01):
+ if ( x86_insn_modrm(state, NULL, &ext) >= 0
+ && (ext & 5) == 4 ) /* SMSW / LMSW */
+ return true;
+ break;
+
+ case X86EMUL_OPC(0x0f, 0x06): /* CLTS */
+ case X86EMUL_OPC(0x0f, 0x20): /* MOV from CRn */
+ case X86EMUL_OPC(0x0f, 0x22): /* MOV to CRn */
+ return true;
+ }
+
+ return false;
+}
+
unsigned long
x86_insn_immediate(const struct x86_emulate_state *state, unsigned int nr)
{
unsigned int
x86_insn_length(const struct x86_emulate_state *state,
const struct x86_emulate_ctxt *ctxt);
+bool
+x86_insn_is_mem_access(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt);
+bool
+x86_insn_is_mem_write(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt);
+bool
+x86_insn_is_portio(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt);
+bool
+x86_insn_is_cr_access(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt);
#ifdef NDEBUG
static inline void x86_emulate_free_state(struct x86_emulate_state *state) {}
#include <asm/hvm/hvm.h>
#include <asm/x86_emulate.h>
+typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state,
+ const struct x86_emulate_ctxt *ctxt);
+
struct hvm_emulate_ctxt {
struct x86_emulate_ctxt ctxt;
+ /*
+ * validate: Post-decode, pre-emulate hook to allow caller controlled
+ * filtering.
+ */
+ hvm_emulate_validate_t *validate;
+
/* Cache of 16 bytes of instruction. */
uint8_t insn_buf[16];
unsigned long insn_buf_eip;
EMUL_KIND_SET_CONTEXT_INSN
};
+bool __nonnull(1) hvm_emulate_one_insn(
+ hvm_emulate_validate_t *validate);
int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
void hvm_emulate_one_vm_event(enum emul_kind kind,
/* Must be called once to set up hvmemul state. */
void hvm_emulate_init_once(
struct hvm_emulate_ctxt *hvmemul_ctxt,
+ hvm_emulate_validate_t *validate,
struct cpu_user_regs *regs);
/* Must be called once before each instruction emulated. */
void hvm_emulate_init_per_insn(
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla);
+static inline bool handle_mmio(void)
+{
+ return hvm_emulate_one_insn(x86_insn_is_mem_access);
+}
+
int hvmemul_insn_fetch(enum x86_segment seg,
unsigned long offset,
void *p_data,
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
-bool handle_mmio(void);
bool handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
struct npfec);
bool handle_pio(uint16_t port, unsigned int size, int dir);