if ( data_is_addr || dir == IOREQ_WRITE )
return X86EMUL_UNHANDLEABLE;
goto finish_access;
- case HVMIO_dispatched:
- /* May have to wait for previous cycle of a multi-write to complete. */
- if ( is_mmio && !data_is_addr && (dir == IOREQ_WRITE) &&
- (addr == (vio->mmio_large_write_pa +
- vio->mmio_large_write_bytes)) )
- return X86EMUL_RETRY;
- /* fallthrough */
default:
return X86EMUL_UNHANDLEABLE;
}
- vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ?
- HVMIO_dispatched : HVMIO_awaiting_completion;
+ vio->io_state = HVMIO_awaiting_completion;
vio->io_size = size;
+ vio->io_dir = dir;
+ vio->io_data_is_addr = data_is_addr;
if ( dir == IOREQ_WRITE )
{
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- enum hvm_io_state io_state;
p->state = STATE_IOREQ_NONE;
- io_state = vio->io_state;
- vio->io_state = HVMIO_none;
-
- switch ( io_state )
+ if ( hvm_vcpu_io_need_completion(vio) )
{
- case HVMIO_awaiting_completion:
vio->io_state = HVMIO_completed;
vio->io_data = p->data;
- break;
- default:
- break;
}
+ else
+ vio->io_state = HVMIO_none;
msix_write_completion(curr);
vcpu_end_shutdown_deferral(curr);
rc = hvm_emulate_one(&ctxt);
- if ( rc != X86EMUL_RETRY )
- vio->io_state = HVMIO_none;
- if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
+ if ( hvm_vcpu_io_need_completion(vio) )
+ vio->io_completion = HVMIO_pio_completion;
+
switch ( rc )
{
case X86EMUL_OKAY:
}
break;
case X86EMUL_RETRY:
- if ( vio->io_state != HVMIO_awaiting_completion )
+ /* We should not advance RIP/EIP if the domain is shutting down */
+ if ( curr->domain->is_shutting_down )
return 0;
- /* Completion in hvm_io_assist() with no re-emulation required. */
- ASSERT(dir == IOREQ_READ);
- vio->io_completion = HVMIO_pio_completion;
+
break;
default:
gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
rc = hvm_emulate_one(hvmemul_ctxt);
- if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry )
vio->io_completion = HVMIO_realmode_completion;
if ( rc == X86EMUL_UNHANDLEABLE )
enum hvm_io_state {
HVMIO_none = 0,
- HVMIO_dispatched,
HVMIO_awaiting_completion,
HVMIO_completed
};
enum hvm_io_completion io_completion;
unsigned long io_data;
unsigned int io_size;
+ uint8_t io_dir;
+ uint8_t io_data_is_addr;
/*
* HVM emulation:
const struct g2m_ioport *g2m_ioport;
};
+static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
+{
+ return (vio->io_state == HVMIO_awaiting_completion) &&
+ !vio->io_data_is_addr &&
+ (vio->io_dir == IOREQ_READ);
+}
+
#define VMCX_EADDR (~0ULL)
struct nestedvcpu {