rc = hvm_send_ioreq(s, &p, 0);
if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
vio->io_req.state = STATE_IOREQ_NONE;
- /*
- * This effectively is !hvm_vcpu_io_need_completion(vio), slightly
- * optimized and using local variables we have available.
- */
- else if ( data_is_addr || (!is_mmio && dir == IOREQ_WRITE) )
+ else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
rc = X86EMUL_OKAY;
}
break;
if ( rc == X86EMUL_OKAY && vio->mmio_retry )
rc = X86EMUL_RETRY;
- if ( !hvm_vcpu_io_need_completion(vio) )
+ if ( !hvm_ioreq_needs_completion(&vio->io_req) )
{
vio->mmio_cache_count = 0;
vio->mmio_insn_bytes = 0;
rc = hvm_emulate_one(&ctxt);
- if ( hvm_vcpu_io_need_completion(vio) )
+ if ( hvm_ioreq_needs_completion(&vio->io_req) )
vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
- if ( hvm_vcpu_io_need_completion(vio) )
+ if ( hvm_ioreq_needs_completion(&vio->io_req) )
vio->io_completion = HVMIO_pio_completion;
switch ( rc )
static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
{
struct vcpu *v = sv->vcpu;
- struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
+ ioreq_t *ioreq = &v->arch.hvm_vcpu.hvm_io.io_req;
- if ( hvm_vcpu_io_need_completion(vio) )
+ if ( hvm_ioreq_needs_completion(ioreq) )
{
- vio->io_req.state = STATE_IORESP_READY;
- vio->io_req.data = data;
+ ioreq->state = STATE_IORESP_READY;
+ ioreq->data = data;
}
else
- vio->io_req.state = STATE_IOREQ_NONE;
+ ioreq->state = STATE_IOREQ_NONE;
msix_write_completion(v);
vcpu_end_shutdown_deferral(v);
rc = hvm_emulate_one(hvmemul_ctxt);
- if ( hvm_vcpu_io_need_completion(vio) )
+ if ( hvm_ioreq_needs_completion(&vio->io_req) )
vio->io_completion = HVMIO_realmode_completion;
if ( rc == X86EMUL_UNHANDLEABLE )
const struct g2m_ioport *g2m_ioport;
};
-static inline bool hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
+static inline bool hvm_ioreq_needs_completion(const ioreq_t *ioreq)
{
- return (vio->io_req.state == STATE_IOREQ_READY) &&
- !vio->io_req.data_is_ptr &&
- (vio->io_req.type != IOREQ_TYPE_PIO ||
- vio->io_req.dir != IOREQ_WRITE);
+ return ioreq->state == STATE_IOREQ_READY &&
+ !ioreq->data_is_ptr &&
+ (ioreq->type != IOREQ_TYPE_PIO || ioreq->dir != IOREQ_WRITE);
}
struct nestedvcpu {