}
static int fuzz_insn_fetch(
- enum x86_segment seg,
unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- assert(seg == x86_seg_cs);
-
/* Minimal segment limit checking, until full one is being put in place. */
if ( ctxt->addr_size < 64 && (offset >> 32) )
{
return maybe_fail(ctxt, "insn_fetch", true);
}
- return data_read(ctxt, seg, "insn_fetch", p_data, bytes);
+ return data_read(ctxt, x86_seg_cs, "insn_fetch", p_data, bytes);
}
static int _fuzz_rep_read(struct x86_emulate_ctxt *ctxt,
void do_test(uint8_t *instr, unsigned int len, unsigned int modrm,
enum mem_access mem, struct x86_emulate_ctxt *ctxt,
- int (*fetch)(enum x86_segment seg,
- unsigned long offset,
+ int (*fetch)(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt))
}
void predicates_test(void *instr, struct x86_emulate_ctxt *ctxt,
- int (*fetch)(enum x86_segment seg,
- unsigned long offset,
+ int (*fetch)(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt))
}
static int fetch(
- enum x86_segment seg,
unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
if ( verbose )
- printf("** %s(%u, %p,, %u,)\n", __func__, seg, (void *)offset, bytes);
+ printf("** %s(CS:%p,, %u,)\n", __func__, (void *)offset, bytes);
memcpy(p_data, (void *)offset, bytes);
return X86EMUL_OKAY;
void evex_disp8_test(void *instr, struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops);
void predicates_test(void *instr, struct x86_emulate_ctxt *ctxt,
- int (*fetch)(enum x86_segment seg,
- unsigned long offset,
+ int (*fetch)(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt));
}
int hvmemul_insn_fetch(
- enum x86_segment seg,
unsigned long offset,
void *p_data,
unsigned int bytes,
if ( !bytes ||
unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
{
- int rc = __hvmemul_read(seg, offset, p_data, bytes,
+ int rc = __hvmemul_read(x86_seg_cs, offset, p_data, bytes,
hvm_access_insn_fetch, hvmemul_ctxt);
if ( rc == X86EMUL_OKAY && bytes )
}
static int
-hvm_emulate_insn_fetch(enum x86_segment seg,
- unsigned long offset,
+hvm_emulate_insn_fetch(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
container_of(ctxt, struct sh_emulate_ctxt, ctxt);
unsigned int insn_off = offset - sh_ctxt->insn_buf_eip;
- ASSERT(seg == x86_seg_cs);
-
/* Fall back if requested bytes are not in the prefetch cache. */
if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
- return hvm_read(seg, offset, p_data, bytes,
+ return hvm_read(x86_seg_cs, offset, p_data, bytes,
hvm_access_insn_fetch, sh_ctxt);
/* Hit the cache. Simple memcpy. */
return X86EMUL_OKAY;
}
+static int fetch(unsigned long offset, void *p_data,
+ unsigned int bytes, struct x86_emulate_ctxt *ctxt)
+{
+ return read_mem(x86_seg_cs, offset, p_data, bytes, ctxt);
+}
+
void pv_emulate_gate_op(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
ctxt.ctxt.addr_size = ar & _SEGMENT_DB ? 32 : 16;
/* Leave zero in ctxt.ctxt.sp_size, as it's not needed for decoding. */
- state = x86_decode_insn(&ctxt.ctxt, read_mem);
+ state = x86_decode_insn(&ctxt.ctxt, fetch);
ctxt.insn_fetch = false;
if ( IS_ERR_OR_NULL(state) )
{
return X86EMUL_UNHANDLEABLE;
}
-static int insn_fetch(enum x86_segment seg,
- unsigned long offset,
+static int insn_fetch(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
unsigned int rc;
unsigned long addr = poc->cs.base + offset;
- ASSERT(seg == x86_seg_cs);
-
/* We don't mean to emulate any branches. */
if ( !bytes )
return X86EMUL_UNHANDLEABLE;
return X86EMUL_OKAY;
}
+static int ptwr_emulated_insn_fetch(unsigned long offset,
+ void *p_data, unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ unsigned int rc = copy_from_guest_pv(p_data, (void *)offset, bytes);
+
+ if ( rc )
+ {
+ x86_emul_pagefault(PFEC_insn_fetch, offset + bytes - rc, ctxt);
+ return X86EMUL_EXCEPTION;
+ }
+
+ return X86EMUL_OKAY;
+}
+
/*
* p_old being NULL indicates a plain write to occur, while a non-NULL
* input requests a CMPXCHG-based update.
static const struct x86_emulate_ops ptwr_emulate_ops = {
.read = ptwr_emulated_read,
- .insn_fetch = ptwr_emulated_read,
+ .insn_fetch = ptwr_emulated_insn_fetch,
.write = ptwr_emulated_write,
.cmpxchg = ptwr_emulated_cmpxchg,
.validate = pv_emul_is_mem_write,
static const struct x86_emulate_ops mmio_ro_emulate_ops = {
.read = x86emul_unhandleable_rw,
- .insn_fetch = ptwr_emulated_read,
+ .insn_fetch = ptwr_emulated_insn_fetch,
.write = mmio_ro_emulated_write,
.validate = pv_emul_is_mem_write,
};
static const struct x86_emulate_ops mmcfg_intercept_ops = {
.read = x86emul_unhandleable_rw,
- .insn_fetch = ptwr_emulated_read,
+ .insn_fetch = ptwr_emulated_insn_fetch,
.write = mmcfg_intercept_write,
.validate = pv_emul_is_mem_write,
};
generate_exception_if((uint8_t)(state->ip - \
ctxt->regs->r(ip)) > MAX_INST_LEN, \
EXC_GP, 0); \
- rc = ops->insn_fetch(x86_seg_cs, _ip, &_x, (_size), ctxt); \
+ rc = ops->insn_fetch(_ip, &_x, _size, ctxt); \
if ( rc ) goto done; \
_x; \
})
ip = (uint16_t)ip; \
else if ( !mode_64bit() ) \
ip = (uint32_t)ip; \
- rc = ops->insn_fetch(x86_seg_cs, ip, NULL, 0, ctxt); \
+ rc = ops->insn_fetch(ip, NULL, 0, ctxt); \
if ( rc ) goto done; \
_regs.r(ip) = ip; \
singlestep = _regs.eflags & X86_EFLAGS_TF; \
? 8 : op_bytes;
if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + src.val),
&dst.val, op_bytes, ctxt, ops)) != 0 ||
- (rc = ops->insn_fetch(x86_seg_cs, dst.val, NULL, 0, ctxt)) )
+ (rc = ops->insn_fetch(dst.val, NULL, 0, ctxt)) )
goto done;
_regs.r(ip) = dst.val;
adjust_bnd(ctxt, ops, vex.pfx);
break;
case 2: /* call (near) */
dst.val = _regs.r(ip);
- if ( (rc = ops->insn_fetch(x86_seg_cs, src.val, NULL, 0, ctxt)) )
+ if ( (rc = ops->insn_fetch(src.val, NULL, 0, ctxt)) )
goto done;
_regs.r(ip) = src.val;
src.val = dst.val;
adjust_bnd(ctxt, ops, vex.pfx);
goto push;
case 4: /* jmp (near) */
- if ( (rc = ops->insn_fetch(x86_seg_cs, src.val, NULL, 0, ctxt)) )
+ if ( (rc = ops->insn_fetch(src.val, NULL, 0, ctxt)) )
goto done;
_regs.r(ip) = src.val;
dst.type = OP_NONE;
x86_decode_insn(
struct x86_emulate_ctxt *ctxt,
int (*insn_fetch)(
- enum x86_segment seg, unsigned long offset,
- void *p_data, unsigned int bytes,
+ unsigned long offset, void *p_data, unsigned int bytes,
struct x86_emulate_ctxt *ctxt))
{
static DEFINE_PER_CPU(struct x86_emulate_state, state);
/*
* insn_fetch: Emulate fetch from instruction byte stream.
- * Except for @bytes, all parameters are the same as for 'read'.
+ * Except for @bytes and missing @seg, all parameters are the same as for
+ * 'read'.
* @bytes: Access length (0 <= @bytes < 16, with zero meaning
* "validate address only").
- * @seg is always x86_seg_cs.
*/
int (*insn_fetch)(
- enum x86_segment seg,
unsigned long offset,
void *p_data,
unsigned int bytes,
x86_decode_insn(
struct x86_emulate_ctxt *ctxt,
int (*insn_fetch)(
- enum x86_segment seg, unsigned long offset,
- void *p_data, unsigned int bytes,
+ unsigned long offset, void *p_data, unsigned int bytes,
struct x86_emulate_ctxt *ctxt));
unsigned int
return hvm_emulate_one_insn(x86_insn_is_mem_access, "MMIO");
}
-int hvmemul_insn_fetch(enum x86_segment seg,
- unsigned long offset,
+int hvmemul_insn_fetch(unsigned long offset,
void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt);