struct vcpu *curr = current;
struct hvm_vcpu_io *vio;
ioreq_t *p = get_ioreq(curr);
+ ioreq_t _ioreq;
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
struct page_info *ram_page;
int rc;
+ bool_t has_dm = 1;
+
+ /*
+ * Domains without a backing DM, don't have an ioreq page. Just
+ * point to a struct on the stack, initialising the state as needed.
+ */
+ if ( !p )
+ {
+ has_dm = 0;
+ p = &_ioreq;
+ p->state = STATE_IOREQ_NONE;
+ }
/* Check for paged out page */
ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
p->state = STATE_IORESP_READY;
if ( !vio->mmio_retry )
{
- hvm_io_assist();
+ hvm_io_assist(p);
vio->io_state = HVMIO_none;
}
else
vio->io_state = HVMIO_handle_mmio_awaiting_completion;
break;
case X86EMUL_UNHANDLEABLE:
- rc = X86EMUL_RETRY;
- if ( !hvm_send_assist_req(curr) )
- vio->io_state = HVMIO_none;
- else if ( p_data == NULL )
+ /* If there is no backing DM, just ignore accesses */
+ if ( !has_dm )
+ {
rc = X86EMUL_OKAY;
+ vio->io_state = HVMIO_none;
+ }
+ else
+ {
+ rc = X86EMUL_RETRY;
+ if ( !hvm_send_assist_req(curr) )
+ vio->io_state = HVMIO_none;
+ else if ( p_data == NULL )
+ rc = X86EMUL_OKAY;
+ }
break;
default:
BUG();
check_wakeup_from_wait();
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
- p = get_ioreq(v);
+ if ( !(p = get_ioreq(v)) )
+ goto check_inject_trap;
+
while ( p->state != STATE_IOREQ_NONE )
{
switch ( p->state )
{
case STATE_IORESP_READY: /* IORESP_READY -> NONE */
- hvm_io_assist();
+ hvm_io_assist(p);
break;
case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
case STATE_IOREQ_INPROCESS:
}
}
+ check_inject_trap:
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
return 0; /* implicitly bins the i/o operation */
- p = get_ioreq(v);
+ if ( !(p = get_ioreq(v)) )
+ return 0;
+
if ( unlikely(p->state != STATE_IOREQ_NONE) )
{
/* This indicates a bug in the device model. Crash the domain. */
struct vcpu *v = current;
ioreq_t *p = get_ioreq(v);
+ if ( !p )
+ return;
+
if ( p->state != STATE_IOREQ_NONE )
{
gdprintk(XENLOG_ERR, "WARNING: send invalidate req with something "
return 1;
}
-void hvm_io_assist(void)
+void hvm_io_assist(ioreq_t *p)
{
struct vcpu *curr = current;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- ioreq_t *p = get_ioreq(curr);
enum hvm_io_state io_state;
rmb(); /* see IORESP_READY /then/ read contents of ioreq */
int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(void);
+void hvm_io_assist(ioreq_t *p);
void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
union vioapic_redir_entry *ent);
struct domain *d = v->domain;
shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
- ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
- return &p->vcpu_ioreq[v->vcpu_id];
+ return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
}
#define HVM_DELIVER_NO_ERROR_CODE -1