static int hvmemul_do_io(
int is_mmio, paddr_t addr, unsigned long *reps, int size,
- paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+ paddr_t ram_gpa, int dir, int df, void *p_data)
{
+ paddr_t value = ram_gpa;
+ int value_is_ptr = (p_data == NULL);
struct vcpu *curr = current;
vcpu_iodata_t *vio = get_ioreq(curr);
ioreq_t *p = &vio->vp_ioreq;
int rc;
- /* Only retrieve the value from singleton (non-REP) reads. */
- ASSERT((val == NULL) || ((dir == IOREQ_READ) && !value_is_ptr));
+ /*
+ * Weird-sized accesses have undefined behaviour: we discard writes
+ * and read all-ones.
+ */
+ if ( unlikely((size > sizeof(long)) || (size & (size - 1))) )
+ {
+ gdprintk(XENLOG_WARNING, "bad mmio size %d\n", size);
+ ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
+ if ( dir == IOREQ_READ )
+ memset(p_data, ~0, size);
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ if ( (p_data != NULL) && (dir == IOREQ_WRITE) )
+ {
+ memcpy(&value, p_data, size);
+ p_data = NULL;
+ }
if ( is_mmio && !value_is_ptr )
{
unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
{
- *val = 0;
- memcpy(val, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
+ memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
size);
return X86EMUL_OKAY;
}
break;
case HVMIO_completed:
curr->arch.hvm_vcpu.io_state = HVMIO_none;
- if ( val == NULL )
+ if ( p_data == NULL )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
case HVMIO_dispatched:
}
curr->arch.hvm_vcpu.io_state =
- (val == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
+ (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
p->dir = dir;
p->data_is_ptr = value_is_ptr;
break;
case X86EMUL_UNHANDLEABLE:
hvm_send_assist_req(curr);
- rc = (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
+ rc = (p_data != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
break;
default:
BUG();
return rc;
finish_access:
- if ( val != NULL )
- *val = curr->arch.hvm_vcpu.io_data;
+ if ( p_data != NULL )
+ memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size);
if ( is_mmio && !value_is_ptr )
{
sizeof(curr->arch.hvm_vcpu.mmio_large_read)) )
{
memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
- val, size);
+ p_data, size);
curr->arch.hvm_vcpu.mmio_large_read_bytes += size;
}
}
static int hvmemul_do_pio(
unsigned long port, unsigned long *reps, int size,
- paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+ paddr_t ram_gpa, int dir, int df, void *p_data)
{
- return hvmemul_do_io(0, port, reps, size, value,
- dir, df, value_is_ptr, val);
+ return hvmemul_do_io(0, port, reps, size, ram_gpa, dir, df, p_data);
}
static int hvmemul_do_mmio(
paddr_t gpa, unsigned long *reps, int size,
- paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+ paddr_t ram_gpa, int dir, int df, void *p_data)
{
- return hvmemul_do_io(1, gpa, reps, size, value,
- dir, df, value_is_ptr, val);
+ return hvmemul_do_io(1, gpa, reps, size, ram_gpa, dir, df, p_data);
}
/*
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
- unsigned long *val,
+ void *p_data,
unsigned int bytes,
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
if ( rc != X86EMUL_OKAY )
return rc;
- *val = 0;
-
if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
curr->arch.hvm_vcpu.mmio_gva )
{
gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
if ( (off + bytes) <= PAGE_SIZE )
return hvmemul_do_mmio(gpa, &reps, bytes, 0,
- IOREQ_READ, 0, 0, val);
+ IOREQ_READ, 0, p_data);
}
if ( (seg != x86_seg_none) &&
pfec |= PFEC_user_mode;
rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_virt(val, addr, bytes, pfec) :
- hvm_copy_from_guest_virt(val, addr, bytes, pfec));
+ hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
+ hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
if ( rc == HVMCOPY_bad_gva_to_gfn )
return X86EMUL_EXCEPTION;
if ( rc == HVMCOPY_bad_gfn_to_mfn )
{
- unsigned long reps = 1;
-
if ( access_type == hvm_access_insn_fetch )
return X86EMUL_UNHANDLEABLE;
if ( rc != X86EMUL_OKAY )
return rc;
- return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
+ return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
}
return X86EMUL_OKAY;
static int hvmemul_read(
enum x86_segment seg,
unsigned long offset,
- unsigned long *val,
+ void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
return __hvmemul_read(
- seg, offset, val, bytes, hvm_access_read,
+ seg, offset, p_data, bytes, hvm_access_read,
container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
}
static int hvmemul_insn_fetch(
enum x86_segment seg,
unsigned long offset,
- unsigned long *val,
+ void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
/* Fall back if requested bytes are not in the prefetch cache. */
if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
return __hvmemul_read(
- seg, offset, val, bytes,
+ seg, offset, p_data, bytes,
hvm_access_insn_fetch, hvmemul_ctxt);
/* Hit the cache. Simple memcpy. */
- *val = 0;
- memcpy(val, &hvmemul_ctxt->insn_buf[insn_off], bytes);
+ memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
return X86EMUL_OKAY;
}
static int hvmemul_write(
enum x86_segment seg,
unsigned long offset,
- unsigned long val,
+ void *p_data,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
unsigned int off = addr & (PAGE_SIZE - 1);
gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
if ( (off + bytes) <= PAGE_SIZE )
- return hvmemul_do_mmio(gpa, &reps, bytes, val,
- IOREQ_WRITE, 0, 0, NULL);
+ return hvmemul_do_mmio(gpa, &reps, bytes, 0,
+ IOREQ_WRITE, 0, p_data);
}
if ( (seg != x86_seg_none) &&
(hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
pfec |= PFEC_user_mode;
- rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec);
+ rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
if ( rc == HVMCOPY_bad_gva_to_gfn )
return X86EMUL_EXCEPTION;
if ( rc == HVMCOPY_bad_gfn_to_mfn )
{
- unsigned long reps = 1;
-
rc = hvmemul_linear_to_phys(
addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
- return hvmemul_do_mmio(gpa, &reps, bytes, val,
- IOREQ_WRITE, 0, 0, NULL);
+ return hvmemul_do_mmio(gpa, &reps, bytes, 0,
+ IOREQ_WRITE, 0, p_data);
}
return X86EMUL_OKAY;
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- unsigned long new = 0;
- if ( bytes > sizeof(new) )
- return X86EMUL_UNHANDLEABLE;
- memcpy(&new, p_new, bytes);
/* Fix this in case the guest is really relying on r-m-w atomicity. */
- return hvmemul_write(seg, offset, new, bytes, ctxt);
+ return hvmemul_write(seg, offset, p_new, bytes, ctxt);
}
static int hvmemul_rep_ins(
return rc;
return hvmemul_do_pio(src_port, reps, bytes_per_rep, gpa, IOREQ_READ,
- !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
}
static int hvmemul_rep_outs(
return rc;
return hvmemul_do_pio(dst_port, reps, bytes_per_rep, gpa, IOREQ_WRITE,
- !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
}
static int hvmemul_rep_movs(
if ( !p2m_is_ram(p2mt) )
return hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ,
- !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
(void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
if ( p2m_is_ram(p2mt) )
return X86EMUL_UNHANDLEABLE;
return hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE,
- !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+ !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
}
static int hvmemul_read_segment(
struct x86_emulate_ctxt *ctxt)
{
unsigned long reps = 1;
- return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
+ *val = 0;
+ return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, val);
}
static int hvmemul_write_io(
struct x86_emulate_ctxt *ctxt)
{
unsigned long reps = 1;
- return hvmemul_do_pio(port, &reps, bytes, val, IOREQ_WRITE, 0, 0, NULL);
+ return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_WRITE, 0, &val);
}
static int hvmemul_read_cr(
/* Fetch next part of the instruction being emulated. */
#define insn_fetch_bytes(_size) \
-({ unsigned long _x, _eip = _regs.eip; \
+({ unsigned long _x = 0, _eip = _regs.eip; \
if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \
_regs.eip += (_size); /* real hardware doesn't truncate */ \
generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15, \
__put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \
})
+/* Compatibility function: read guest memory, zero-extend result to a ulong. */
+static int read_ulong(
+ enum x86_segment seg,
+ unsigned long offset,
+ unsigned long *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+{
+ *val = 0;
+ return ops->read(seg, offset, val, bytes, ctxt);
+}
+
/*
* Unsigned multiplication with double-word result.
* IN: Multiplicand=m[0], Multiplier=m[1]
(tr.limit < 0x67) )
goto raise_exception;
- if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_none, tr.base + 0x66,
+ &iobmp, 2, ctxt, ops)) )
return rc;
/* Ensure TSS includes two bytes including byte containing first port. */
if ( tr.limit <= iobmp )
goto raise_exception;
- if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_none, tr.base + iobmp,
+ &iobmp, 2, ctxt, ops)) )
return rc;
if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 )
goto raise_exception;
goto raise_exn;
do {
- if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
- &val, 4, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8),
+ &val, 4, ctxt, ops)) )
return rc;
desc.a = val;
- if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
- &val, 4, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
+ &val, 4, ctxt, ops)) )
return rc;
desc.b = val;
case 8: src.val = *(uint64_t *)src.reg; break;
}
}
- else if ( (rc = ops->read(src.mem.seg, src.mem.off,
- &src.val, src.bytes, ctxt)) )
+ else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
+ &src.val, src.bytes, ctxt, ops)) )
goto done;
break;
case SrcImm:
}
else if ( !(d & Mov) ) /* optimisation - avoid slow emulated read */
{
- if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
- &dst.val, dst.bytes, ctxt)) )
+ if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+ &dst.val, dst.bytes, ctxt, ops)) )
goto done;
dst.orig_val = dst.val;
}
int lb, ub, idx;
generate_exception_if(mode_64bit() || (src.type != OP_MEM),
EXC_UD, -1);
- if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes,
- &src_val2, op_bytes, ctxt)) )
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
+ &src_val2, op_bytes, ctxt, ops)) )
goto done;
ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val;
/* movsxd */
if ( src.type == OP_REG )
src.val = *(int32_t *)src.reg;
- else if ( (rc = ops->read(src.mem.seg, src.mem.off,
- &src.val, 4, ctxt)) )
+ else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
+ &src.val, 4, ctxt, ops)) )
goto done;
dst.val = (int32_t)src.val;
}
unsigned long src1; /* ModR/M source operand */
if ( ea.type == OP_REG )
src1 = *ea.reg;
- else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
- &src1, op_bytes, ctxt)) )
+ else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+ &src1, op_bytes, ctxt, ops)) )
goto done;
_regs.eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
break;
dst.val = x86_seg_es;
les: /* dst.val identifies the segment */
generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
- if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
- &sel, 2, ctxt)) != 0 )
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ &sel, 2, ctxt, ops)) != 0 )
goto done;
if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
goto done;
dst.bytes = op_bytes = 8;
if ( dst.type == OP_REG )
dst.val = *dst.reg;
- else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt)) != 0 )
+ else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt, ops)) != 0 )
goto done;
}
src.val = _regs.eip;
generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
- if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
- &sel, 2, ctxt)) )
+ if ( (rc = read_ulong(dst.mem.seg, dst.mem.off+dst.bytes,
+ &sel, 2, ctxt, ops)) )
goto done;
if ( (modrm_reg & 7) == 3 ) /* call */
fail_if(ops->read_segment == NULL);
if ( (rc = ops->read_segment(x86_seg_cs, ®, ctxt)) ||
(rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- reg.sel, op_bytes, ctxt)) ||
+ ®.sel, op_bytes, ctxt)) ||
(rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- _regs.eip, op_bytes, ctxt)) )
+ &_regs.eip, op_bytes, ctxt)) )
goto done;
}
dst.bytes = 8;
if ( dst.type == OP_REG )
dst.val = *dst.reg;
- else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt)) != 0 )
+ else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt, ops)) != 0 )
goto done;
}
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
- dst.val, dst.bytes, ctxt)) != 0 )
+ &dst.val, dst.bytes, ctxt)) != 0 )
goto done;
dst.type = OP_NONE;
break;
&dst.val, dst.bytes, ctxt);
else
rc = ops->write(
- dst.mem.seg, dst.mem.off, dst.val, dst.bytes, ctxt);
+ dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, ctxt);
if ( rc != 0 )
goto done;
default:
if ( mode_64bit() && (op_bytes == 4) )
op_bytes = 8;
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- reg.sel, op_bytes, ctxt)) != 0 )
+ ®.sel, op_bytes, ctxt)) != 0 )
goto done;
break;
}
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (op_bytes == 4) )
op_bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &dst.val, op_bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
return rc;
dst.bytes = op_bytes;
if ( mode_64bit() && (dst.bytes == 4) )
dst.bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
break;
generate_exception_if(mode_64bit(), EXC_UD, -1);
for ( i = 0; i < 8; i++ )
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- regs[i], op_bytes, ctxt)) != 0 )
+ ®s[i], op_bytes, ctxt)) != 0 )
goto done;
break;
}
generate_exception_if(mode_64bit(), EXC_UD, -1);
for ( i = 0; i < 8; i++ )
{
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &dst.val, op_bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
switch ( op_bytes )
{
}
else
{
- if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
fail_if(ops->write_io == NULL);
if ( (rc = ops->write_io(port, dst.bytes, dst.val, ctxt)) != 0 )
if ( (rc = ops->read_segment(x86_seg_cs, ®, ctxt)) ||
(rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- reg.sel, op_bytes, ctxt)) ||
+ ®.sel, op_bytes, ctxt)) ||
(rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- _regs.eip, op_bytes, ctxt)) )
+ &_regs.eip, op_bytes, ctxt)) )
goto done;
if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
/* 64-bit mode: POP defaults to a 64-bit operand. */
if ( mode_64bit() && (op_bytes == 4) )
op_bytes = 8;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &dst.val, op_bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
if ( op_bytes == 2 )
dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u);
dst.type = OP_REG;
dst.reg = (unsigned long *)&_regs.eax;
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
- if ( (rc = ops->read(ea.mem.seg, insn_fetch_bytes(ad_bytes),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, insn_fetch_bytes(ad_bytes),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
break;
}
else
{
- if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
dst.type = OP_MEM;
nr_reps = 1;
unsigned long next_eip = _regs.eip;
get_rep_prefix();
src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
- if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
- &dst.val, dst.bytes, ctxt)) ||
- (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
- &src.val, src.bytes, ctxt)) )
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+ &dst.val, dst.bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
+ &src.val, src.bytes, ctxt, ops)) )
goto done;
register_address_increment(
_regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
dst.type = OP_REG;
dst.bytes = (d & ByteOp) ? 1 : op_bytes;
dst.reg = (unsigned long *)&_regs.eax;
- if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
- &dst.val, dst.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
goto done;
register_address_increment(
_regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
get_rep_prefix();
src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
dst.val = _regs.eax;
- if ( (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
- &src.val, src.bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
+ &src.val, src.bytes, ctxt, ops)) != 0 )
goto done;
register_address_increment(
_regs.edi, (_regs.eflags & EFLG_DF) ? -src.bytes : src.bytes);
case 0xc3: /* ret (near) */ {
int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
op_bytes = mode_64bit() ? 8 : op_bytes;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
- &dst.val, op_bytes, ctxt)) != 0 )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
_regs.eip = dst.val;
break;
dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
dst.reg = (unsigned long *)&_regs.ebp;
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
- _regs.ebp, dst.bytes, ctxt)) )
+ &_regs.ebp, dst.bytes, ctxt)) )
goto done;
dst.val = _regs.esp;
{
unsigned long ebp, temp_data;
ebp = truncate_word(_regs.ebp - i*dst.bytes, ctxt->sp_size/8);
- if ( (rc = ops->read(x86_seg_ss, ebp,
- &temp_data, dst.bytes, ctxt)) ||
+ if ( (rc = read_ulong(x86_seg_ss, ebp,
+ &temp_data, dst.bytes, ctxt, ops)) ||
(rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
- temp_data, dst.bytes, ctxt)) )
+ &temp_data, dst.bytes, ctxt)) )
goto done;
}
if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
- dst.val, dst.bytes, ctxt)) )
+ &dst.val, dst.bytes, ctxt)) )
goto done;
}
/* Second writeback, to %%ebp. */
dst.reg = (unsigned long *)&_regs.ebp;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
- &dst.val, dst.bytes, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+ &dst.val, dst.bytes, ctxt, ops)) )
goto done;
break;
case 0xcb: /* ret (far) */ {
int offset = (b == 0xca) ? insn_fetch_type(uint16_t) : 0;
op_bytes = mode_64bit() ? 8 : op_bytes;
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &dst.val, op_bytes, ctxt)) ||
- (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
- &src.val, op_bytes, ctxt)) ||
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+ &src.val, op_bytes, ctxt, ops)) ||
(rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
goto done;
_regs.eip = dst.val;
if ( !mode_iopl() )
mask |= EFLG_IF;
fail_if(!in_realmode(ctxt, ops));
- if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &eip, op_bytes, ctxt)) ||
- (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &cs, op_bytes, ctxt)) ||
- (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
- &eflags, op_bytes, ctxt)) )
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &eip, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &cs, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &eflags, op_bytes, ctxt, ops)) )
goto done;
if ( op_bytes == 2 )
eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u);
case 0xd7: /* xlat */ {
unsigned long al = (uint8_t)_regs.eax;
- if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.ebx + al),
- &al, 1, ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.ebx + al),
+ &al, 1, ctxt, ops)) != 0 )
goto done;
*(uint8_t *)&_regs.eax = al;
break;
if ( op_bytes == 2 )
reg.base &= 0xffffff;
if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0,
- reg.limit, 2, ctxt)) ||
+ ®.limit, 2, ctxt)) ||
(rc = ops->write(ea.mem.seg, ea.mem.off+2,
- reg.base, mode_64bit() ? 8 : 4, ctxt)) )
+ ®.base, mode_64bit() ? 8 : 4, ctxt)) )
goto done;
break;
case 2: /* lgdt */
generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
fail_if(ops->write_segment == NULL);
memset(®, 0, sizeof(reg));
- if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
- &limit, 2, ctxt)) ||
- (rc = ops->read(ea.mem.seg, ea.mem.off+2,
- &base, mode_64bit() ? 8 : 4, ctxt)) )
+ if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+ &limit, 2, ctxt, ops)) ||
+ (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
+ &base, mode_64bit() ? 8 : 4, ctxt, ops)) )
goto done;
reg.base = base;
reg.limit = limit;
goto done;
if ( ea.type == OP_REG )
cr0w = *ea.reg;
- else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
- &cr0w, 2, ctxt)) )
+ else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+ &cr0w, 2, ctxt, ops)) )
goto done;
/* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
cr0 = (cr0 & ~0xe) | (cr0w & 0xf);
if ( ea.type == OP_MEM )
{
unsigned long lval, hval;
- if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
- (rc = ops->read(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
+ if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+ &lval, 4, ctxt, ops)) ||
+ (rc = read_ulong(ea.mem.seg, ea.mem.off+4,
+ &hval, 4, ctxt, ops)) )
goto done;
val = ((uint64_t)hval << 32) | (uint32_t)lval;
stub[2] = modrm & 0x38; /* movq (%eax),%mmN */
if ( ea.type == OP_MEM )
{
unsigned long lval = (uint32_t)val, hval = (uint32_t)(val >> 32);
- if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, lval, 4, ctxt)) ||
- (rc = ops->write(ea.mem.seg, ea.mem.off+4, hval, 4, ctxt)) )
+ if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
+ (rc = ops->write(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
goto done;
}
break;
/* Get actual old value. */
for ( i = 0; i < (op_bytes/sizeof(long)); i++ )
- if ( (rc = ops->read(ea.mem.seg, ea.mem.off + i*sizeof(long),
- &old[i], sizeof(long), ctxt)) != 0 )
+ if ( (rc = read_ulong(ea.mem.seg, ea.mem.off + i*sizeof(long),
+ &old[i], sizeof(long), ctxt, ops)) != 0 )
goto done;
/* Get expected and proposed values. */