req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
- if (req->state == STATE_IOREQ_READY) {
- req->state = STATE_IOREQ_INPROCESS;
- rmb();
- return req;
+ if (req->state != STATE_IOREQ_READY) {
+ fprintf(logfile, "I/O request not ready: "
+ "%x, ptr: %x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
+ return NULL;
}
- fprintf(logfile, "False I/O request ... in-service already: "
- "%x, ptr: %x, port: %"PRIx64", "
- "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
- req->state, req->data_is_ptr, req->addr,
- req->data, req->count, req->size);
- return NULL;
+ rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
+
+ req->state = STATE_IOREQ_INPROCESS;
+ return req;
}
//use poll to get the port notification
if (req) {
__handle_ioreq(env, req);
- /* No state change if state = STATE_IORESP_HOOK */
- if (req->state == STATE_IOREQ_INPROCESS) {
- req->state = STATE_IORESP_READY;
- xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
- } else
+ if (req->state != STATE_IOREQ_INPROCESS) {
+ fprintf(logfile, "Badness in I/O request ... not in service?!: "
+ "%x, ptr: %x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->addr,
+ req->data, req->count, req->size);
destroy_hvm_domain();
+ return;
+ }
+
+ wmb(); /* Update ioreq contents /then/ update state. */
+ req->state = STATE_IORESP_READY;
+ xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
}
}
ioreq_t *p;
p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- if ( unlikely(p->state != STATE_IOREQ_NONE) ) {
- /* This indicates a bug in the device model. Crash the
- domain. */
- printk("Device model set bad IO state %d.\n", p->state);
+ if ( unlikely(p->state != STATE_IOREQ_NONE) )
+ {
+ /* This indicates a bug in the device model. Crash the domain. */
+ gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
domain_crash(v->domain);
return;
}
prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
+
+ /*
+ * Following happens /after/ blocking and setting up ioreq contents.
+ * prepare_wait_on_xen_event_channel() is an implicit barrier.
+ */
p->state = STATE_IOREQ_READY;
notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
}
p->data = shadow_gva_to_gpa(current, value);
else
p->data = value; /* guest VA == guest PA */
- } else if ( dir == IOREQ_WRITE )
+ }
+ else if ( dir == IOREQ_WRITE )
p->data = value;
- if ( hvm_portio_intercept(p) ) {
+ if ( hvm_portio_intercept(p) )
+ {
p->state = STATE_IORESP_READY;
hvm_io_assist(v);
return;
p->io_count++;
- if (value_is_ptr) {
- if (hvm_paging_enabled(v))
+ if ( value_is_ptr )
+ {
+ if ( hvm_paging_enabled(v) )
p->data = shadow_gva_to_gpa(v, value);
else
p->data = value; /* guest VA == guest PA */
- } else
+ }
+ else
p->data = value;
- if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
+ if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
+ {
p->state = STATE_IORESP_READY;
hvm_io_assist(v);
return;