return X86EMUL_UNHANDLEABLE;
}
- if ( hvm_io_pending(curr) )
- {
- gdprintk(XENLOG_WARNING, "WARNING: io already pending?\n");
- return X86EMUL_UNHANDLEABLE;
- }
-
vio->io_state = (data_is_addr || dir == IOREQ_WRITE) ?
HVMIO_dispatched : HVMIO_awaiting_completion;
vio->io_size = size;
}
else
{
- rc = X86EMUL_RETRY;
- if ( !hvm_send_assist_req(s, &p) )
+ rc = hvm_send_assist_req(s, &p);
+ if ( rc != X86EMUL_RETRY )
vio->io_state = HVMIO_none;
else if ( data_is_addr || dir == IOREQ_WRITE )
rc = X86EMUL_OKAY;
return 1;
}
-bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
ASSERT(s);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
- return 0; /* implicitly bins the i/o operation */
+ return X86EMUL_OKAY;
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
{
gprintk(XENLOG_ERR, "device model set bad IO state %d\n",
p->state);
- goto crash;
+ break;
}
if ( unlikely(p->vp_eport != port) )
{
gprintk(XENLOG_ERR, "device model set bad event channel %d\n",
p->vp_eport);
- goto crash;
+ break;
}
proto_p->state = STATE_IOREQ_NONE;
*/
p->state = STATE_IOREQ_READY;
notify_via_xen_event_channel(d, port);
- break;
+ return X86EMUL_RETRY;
}
}
- return 1;
-
- crash:
- domain_crash(d);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
void hvm_complete_assist_req(ioreq_t *p)
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p);
-bool_t hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
+int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
void hvm_broadcast_assist_req(ioreq_t *p);
void hvm_complete_assist_req(ioreq_t *p);