if ( p->state != STATE_IORESP_READY )
{
gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
- domain_crash_synchronous();
+ domain_crash(v->domain);
+ goto out;
}
rmb(); /* see IORESP_READY /then/ read contents of ioreq */
p->state = STATE_IOREQ_NONE;
+ if ( v->arch.hvm_vcpu.io_complete && v->arch.hvm_vcpu.io_complete() )
+ goto out;
+
switch ( p->type )
{
case IOREQ_TYPE_INVALIDATE:
unsigned long *val,
struct x86_emulate_ctxt *ctxt)
{
- return X86EMUL_UNHANDLEABLE;
+ struct vcpu *curr = current;
+
+ if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( !curr->arch.hvm_vmx.real_mode_io_completed )
+ {
+ curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
+ send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
+ }
+
+ if ( !curr->arch.hvm_vmx.real_mode_io_completed )
+ return X86EMUL_UNHANDLEABLE;
+
+ *val = curr->arch.hvm_vmx.real_mode_io_data;
+ curr->arch.hvm_vmx.real_mode_io_completed = 0;
+
+ return X86EMUL_OKAY;
}
static int realmode_write_io(
unsigned long val,
struct x86_emulate_ctxt *ctxt)
{
- return X86EMUL_UNHANDLEABLE;
+ struct vcpu *curr = current;
+
+ if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
+ return X86EMUL_UNHANDLEABLE;
+
+ curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
+ send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
+
+ return X86EMUL_OKAY;
}
static int
rm_ctxt.insn_buf[2], rm_ctxt.insn_buf[3],
rm_ctxt.insn_buf[4], rm_ctxt.insn_buf[5]);
- if ( x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops) )
- {
+ rc = x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops);
+
+ if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
+ {
+ ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
+ gdprintk(XENLOG_DEBUG, "RM I/O %d %c addr=%lx data=%lx\n",
+ p->type, p->dir ? 'R' : 'W', p->addr, p->data);
+ rc = 0;
+ break;
+ }
+
+ if ( rc )
+ {
gdprintk(XENLOG_ERR, "Emulation failed\n");
rc = -EINVAL;
break;
return rc;
}
+
+int vmx_realmode_io_complete(void)
+{
+ struct vcpu *curr = current;
+ ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
+
+ if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
+ return 0;
+
+ curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
+ if ( p->dir == IOREQ_READ )
+ {
+ curr->arch.hvm_vmx.real_mode_io_completed = 1;
+ curr->arch.hvm_vmx.real_mode_io_data = p->data;
+ }
+
+ return 1;
+}
};
/* EFLAGS bit definitions. */
+#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
/* In future we will be able to generate arbitrary exceptions. */
#define generate_exception_if(p, e) fail_if(p)
-/* To be done... */
-#define mode_ring0() (0)
-#define mode_iopl() (0)
-
/* Given byte has even parity (even number of 1s)? */
static int even_parity(uint8_t v)
{
return (!!rc ^ (condition & 1));
}
+static int
+get_cpl(
+ struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+{
+ struct segment_register reg;
+
+ if ( ctxt->regs->eflags & EFLG_VM )
+ return 3;
+
+ if ( (ops->read_segment == NULL) ||
+ ops->read_segment(x86_seg_ss, ®, ctxt) )
+ return -1;
+
+ return reg.attr.fields.dpl;
+}
+
+static int
+_mode_iopl(
+ struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+{
+ int cpl = get_cpl(ctxt, ops);
+ return ((cpl >= 0) && (cpl <= ((ctxt->regs->eflags >> 12) & 3)));
+}
+
+#define mode_ring0() (get_cpl(ctxt, ops) == 0)
+#define mode_iopl() _mode_iopl(ctxt, ops)
+
static int
in_realmode(
struct x86_emulate_ctxt *ctxt,