struct hvm_emulate_ctxt ctxt;
struct vcpu *curr = current;
int rc;
- unsigned long data, reps = 1;
- if ( curr->arch.hvm_vcpu.io_size == 0 ) {
- hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
- rc = hvm_emulate_one(&ctxt);
- } else {
- if ( curr->arch.hvm_vcpu.io_dir == 0 )
- data = guest_cpu_user_regs()->eax;
- rc = hvmemul_do_io(0, curr->arch.hvm_vcpu.io_port, &reps,
- curr->arch.hvm_vcpu.io_size, 0,
- curr->arch.hvm_vcpu.io_dir, 0, &data);
- if ( curr->arch.hvm_vcpu.io_dir == 1 && rc == X86EMUL_OKAY ) {
- memcpy(&(guest_cpu_user_regs()->eax),
- &data, curr->arch.hvm_vcpu.io_size);
- }
- }
+ hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
+
+ rc = hvm_emulate_one(&ctxt);
if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
switch ( rc )
{
case X86EMUL_UNHANDLEABLE:
- if ( curr->arch.hvm_vcpu.io_size == 0 )
- gdprintk(XENLOG_WARNING,
- "MMIO emulation failed @ %04x:%lx: "
- "%02x %02x %02x %02x %02x %02x\n",
- hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel,
- ctxt.insn_buf_eip,
- ctxt.insn_buf[0], ctxt.insn_buf[1],
- ctxt.insn_buf[2], ctxt.insn_buf[3],
- ctxt.insn_buf[4], ctxt.insn_buf[5]);
- else
- gdprintk(XENLOG_WARNING,
- "I/O emulation failed: %s 0x%04x, %i bytes, data=%08lx\n",
- curr->arch.hvm_vcpu.io_dir ? "in" : "out",
- curr->arch.hvm_vcpu.io_port,
- curr->arch.hvm_vcpu.io_size, data);
+ gdprintk(XENLOG_WARNING,
+ "MMIO emulation failed @ %04x:%lx: "
+ "%02x %02x %02x %02x %02x %02x\n",
+ hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel,
+ ctxt.insn_buf_eip,
+ ctxt.insn_buf[0], ctxt.insn_buf[1],
+ ctxt.insn_buf[2], ctxt.insn_buf[3],
+ ctxt.insn_buf[4], ctxt.insn_buf[5]);
return 0;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
break;
}
- if ( curr->arch.hvm_vcpu.io_size == 0 )
- hvm_emulate_writeback(&ctxt);
- else
- curr->arch.hvm_vcpu.io_size = 0;
+ hvm_emulate_writeback(&ctxt);
- if (rc == X86EMUL_RETRY)
- return rc;
- else
- return 1;
+ return 1;
}
int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
return handle_mmio();
}
-int handle_mmio_decoded(uint16_t port, int size, int dir)
+int handle_pio(uint16_t port, int size, int dir)
{
- current->arch.hvm_vcpu.io_port = port;
- current->arch.hvm_vcpu.io_size = size;
- current->arch.hvm_vcpu.io_dir = dir;
- return handle_mmio();
+ struct vcpu *curr = current;
+ unsigned long data, reps = 1;
+ int rc;
+
+ if ( dir == IOREQ_WRITE )
+ data = guest_cpu_user_regs()->eax;
+
+ rc = hvmemul_do_pio(port, &reps, size, 0, dir, 0, &data);
+
+ switch ( rc )
+ {
+ case X86EMUL_OKAY:
+ if ( dir == IOREQ_READ )
+ memcpy(&guest_cpu_user_regs()->eax,
+ &data, curr->arch.hvm_vcpu.io_size);
+ break;
+ case X86EMUL_RETRY:
+ if ( curr->arch.hvm_vcpu.io_state != HVMIO_awaiting_completion )
+ return 0;
+ /* Completion in hvm_io_assist() with no re-emulation required. */
+ ASSERT(dir == IOREQ_READ);
+ curr->arch.hvm_vcpu.io_state = HVMIO_handle_pio_awaiting_completion;
+ break;
+ default:
+ BUG();
+ }
+
+ return 1;
}
void hvm_io_assist(void)
io_state = curr->arch.hvm_vcpu.io_state;
curr->arch.hvm_vcpu.io_state = HVMIO_none;
- if ( (io_state == HVMIO_awaiting_completion) ||
- (io_state == HVMIO_handle_mmio_awaiting_completion) )
+ switch ( io_state )
{
+ case HVMIO_awaiting_completion:
+ curr->arch.hvm_vcpu.io_state = HVMIO_completed;
+ curr->arch.hvm_vcpu.io_data = p->data;
+ break;
+ case HVMIO_handle_mmio_awaiting_completion:
curr->arch.hvm_vcpu.io_state = HVMIO_completed;
curr->arch.hvm_vcpu.io_data = p->data;
- if ( io_state == HVMIO_handle_mmio_awaiting_completion )
- (void)handle_mmio();
+ (void)handle_mmio();
+ break;
+ case HVMIO_handle_pio_awaiting_completion:
+ memcpy(&guest_cpu_user_regs()->eax,
+ &p->data, curr->arch.hvm_vcpu.io_size);
+ break;
+ default:
+ break;
}
if ( p->state == STATE_IOREQ_NONE )
__update_guest_eip(regs, inst_len);
}
-static int svm_vmexit_do_io(struct vmcb_struct *vmcb,
- struct cpu_user_regs *regs)
-{
- uint16_t port;
- int bytes, dir;
- int rc;
-
- port = (vmcb->exitinfo1 >> 16) & 0xFFFF;
- bytes = ((vmcb->exitinfo1 >> 4) & 0x07);
- dir = (vmcb->exitinfo1 & 1) ? IOREQ_READ : IOREQ_WRITE;
-
- rc = handle_mmio_decoded(port, bytes, dir);
- if ( rc != X86EMUL_RETRY )
- __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);
- return rc;
-}
-
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
break;
case VMEXIT_IOIO:
- if ( ( vmcb->exitinfo1 & ( 1 << 2 ) ) == 0 ) {
- if ( !svm_vmexit_do_io(vmcb, regs) )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ if ( (vmcb->exitinfo1 & (1u<<2)) == 0 )
+ {
+ uint16_t port = (vmcb->exitinfo1 >> 16) & 0xFFFF;
+ int bytes = ((vmcb->exitinfo1 >> 4) & 0x07);
+ int dir = (vmcb->exitinfo1 & 1) ? IOREQ_READ : IOREQ_WRITE;
+ if ( handle_pio(port, bytes, dir) )
+ __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip);
break;
}
/* fallthrough to emulation if a string instruction */
return 1;
}
-static int vmx_io_intercept(unsigned long exit_qualification,
- struct cpu_user_regs *regs)
-{
- uint16_t port;
- int bytes, dir;
- int rc;
- int inst_len;
-
- port = (exit_qualification >> 16) & 0xFFFF;
- bytes = (exit_qualification & 0x07) + 1;
- dir = (exit_qualification & 0x08) ? IOREQ_READ : IOREQ_WRITE;
-
- inst_len = __get_instruction_length();
- rc = handle_mmio_decoded(port, bytes, dir);
- if ( rc != X86EMUL_RETRY)
- __update_guest_eip(inst_len);
- return rc;
-}
-
static const struct lbr_info {
u32 base, count;
} p4_lbr[] = {
case EXIT_REASON_IO_INSTRUCTION:
exit_qualification = __vmread(EXIT_QUALIFICATION);
- if (exit_qualification & 0x10) {
+ if ( exit_qualification & 0x10 )
+ {
if ( !handle_mmio() )
vmx_inject_hw_exception(TRAP_gp_fault, 0);
- } else {
- if ( !vmx_io_intercept(exit_qualification, regs) )
- vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ }
+ else
+ {
+ uint16_t port = (exit_qualification >> 16) & 0xFFFF;
+ int bytes = (exit_qualification & 0x07) + 1;
+ int dir = (exit_qualification & 0x08) ? IOREQ_READ : IOREQ_WRITE;
+ inst_len = __get_instruction_length();
+ if ( handle_pio(port, bytes, dir) )
+ __update_guest_eip(inst_len);
}
break;