ioreq-server: handle the lack of a default emulator properly
authorPaul Durrant <paul.durrant@citrix.com>
Wed, 1 Oct 2014 09:37:06 +0000 (11:37 +0200)
committerJan Beulich <jbeulich@suse.com>
Wed, 1 Oct 2014 09:37:06 +0000 (11:37 +0200)
I started porting QEMU over to use the new ioreq server API and hit a
problem with PCI bus enumeration. Because, with my patches, QEMU only
registers to handle config space accesses for the PCI device it implements
all other attempts by the guest to access 0xcfc go nowhere and this was
causing the vcpu to wedge up because nothing was completing the I/O.

This patch introduces an I/O completion handler into the hypervisor for the
case where no ioreq server matches a particular request. Read requests are
completed with 0xf's in the data buffer, writes and all other I/O req types
are ignored.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/hvm.c

index 7e2d5d106e7576bb9e47a8fd48e06b3f1e75d79f..43471de71dc6da36d5083e4a629a81b7400b210a 100644 (file)
@@ -2332,8 +2332,7 @@ static struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
     if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
         return NULL;
 
-    if ( list_is_singular(&d->arch.hvm_domain.ioreq_server.list) ||
-         (p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO) )
+    if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
         return d->arch.hvm_domain.default_ioreq_server;
 
     cf8 = d->arch.hvm_domain.pci_cf8;
@@ -2564,12 +2563,42 @@ bool_t hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
     return 0;
 }
 
+static bool_t hvm_complete_assist_req(ioreq_t *p)
+{
+    switch ( p->type )
+    {
+    case IOREQ_TYPE_COPY:
+    case IOREQ_TYPE_PIO:
+        if ( p->dir == IOREQ_READ )
+        {
+            if ( !p->data_is_ptr )
+                p->data = ~0ul;
+            else
+            {
+                int i, step = p->df ? -p->size : p->size;
+                uint32_t data = ~0;
+
+                for ( i = 0; i < p->count; i++ )
+                    hvm_copy_to_guest_phys(p->data + step * i, &data,
+                                           p->size);
+            }
+        }
+        /* FALLTHRU */
+    default:
+        p->state = STATE_IORESP_READY;
+        hvm_io_assist(p);
+        break;
+    }
+
+    return 1;
+}
+
 bool_t hvm_send_assist_req(ioreq_t *p)
 {
     struct hvm_ioreq_server *s = hvm_select_ioreq_server(current->domain, p);
 
     if ( !s )
-        return 0;
+        return hvm_complete_assist_req(p);
 
     return hvm_send_assist_req_to_ioreq_server(s, p);
 }