return d->arch.hvm_domain.default_ioreq_server;
}
-int hvm_buffered_io_send(ioreq_t *p)
+static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
{
struct domain *d = current->domain;
- struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
struct hvm_ioreq_page *iorp;
buffered_iopage_t *pg;
buf_ioreq_t bp = { .data = p->data,
/* Ensure buffered_iopage fits in a page */
BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
- if ( !s )
- return 0;
-
iorp = &s->bufioreq;
pg = iorp->va;
if ( !pg )
- return 0;
+ return X86EMUL_UNHANDLEABLE;
/*
* Return 0 for the cases we can't deal with:
break;
default:
gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
spin_lock(&s->bufioreq_lock);
{
/* The queue is full: send the iopacket through the normal path. */
spin_unlock(&s->bufioreq_lock);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
notify_via_xen_event_channel(d, s->bufioreq_evtchn);
spin_unlock(&s->bufioreq_lock);
- return 1;
+ return X86EMUL_OKAY;
}
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+ bool_t buffered)
{
struct vcpu *curr = current;
struct domain *d = curr->domain;
struct hvm_ioreq_vcpu *sv;
ASSERT(s);
+
+ if ( buffered )
+ return hvm_send_buffered_ioreq(s, proto_p);
+
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
return X86EMUL_RETRY;
return X86EMUL_UNHANDLEABLE;
}
-void hvm_broadcast_assist_req(ioreq_t *p)
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
{
struct domain *d = current->domain;
struct hvm_ioreq_server *s;
+ unsigned int failed = 0;
ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
list_for_each_entry ( s,
&d->arch.hvm_domain.ioreq_server.list,
list_entry )
- (void) hvm_send_assist_req(s, p);
+ if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+ failed++;
+
+ return failed;
}
void hvm_hlt(unsigned long rflags)
if ( timeoff == 0 )
return;
- if ( !hvm_buffered_io_send(&p) )
- printk("Unsuccessful timeoffset update\n");
+ if ( hvm_broadcast_ioreq(&p, 1) != 0 )
+ gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
}
/* Ask ioemu mapcache to invalidate mappings. */
.data = ~0UL, /* flush all */
};
- hvm_broadcast_assist_req(&p);
+ if ( hvm_broadcast_ioreq(&p, 0) != 0 )
+ gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
}
int handle_mmio(void)
.dir = IOREQ_WRITE,
.data = data,
};
+ struct hvm_ioreq_server *srv;
if ( !s->cache )
goto done;
}
done:
- if ( hvm_buffered_io_send(&p) )
- return X86EMUL_OKAY;
+ srv = hvm_select_ioreq_server(current->domain, &p);
+ if ( !srv )
+ return X86EMUL_UNHANDLEABLE;
- return X86EMUL_UNHANDLEABLE;
+ return hvm_send_ioreq(srv, &p, 1);
}
static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
ioreq_t *p);
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
-void hvm_broadcast_assist_req(ioreq_t *p);
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *p, bool_t buffered);
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered);
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);