x86/hvm: add support for broadcast of buffered ioreqs...
authorPaul Durrant <paul.durrant@citrix.com>
Mon, 13 Jul 2015 09:53:18 +0000 (11:53 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 13 Jul 2015 09:53:18 +0000 (11:53 +0200)
...and make RTC timeoffset ioreqs use it.

Without this patch RTC timeoffset updates go nowhere and Xen complains
with a (non-rate-limited) printk.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/io.c
xen/arch/x86/hvm/stdvga.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/io.h

index 01ee972e8305243273c1b9a568018a8aaa9b76de..795321c2b0b789d90c5b18b3b0474b2ffbc15b8e 100644 (file)
@@ -161,7 +161,7 @@ static int hvmemul_do_io(
         }
         else
         {
-            rc = hvm_send_assist_req(s, &p);
+            rc = hvm_send_ioreq(s, &p, 0);
             if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
                 vio->io_req.state = STATE_IOREQ_NONE;
             else if ( data_is_addr )
index ebcf7a9af637984588bf1546d006500ad88d4101..545aa910cc2bd58b1c0217b9a5cc9fe1918a01dd 100644 (file)
@@ -2561,10 +2561,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
     return d->arch.hvm_domain.default_ioreq_server;
 }
 
-int hvm_buffered_io_send(ioreq_t *p)
+static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
 {
     struct domain *d = current->domain;
-    struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
     struct hvm_ioreq_page *iorp;
     buffered_iopage_t *pg;
     buf_ioreq_t bp = { .data = p->data,
@@ -2577,14 +2576,11 @@ int hvm_buffered_io_send(ioreq_t *p)
     /* Ensure buffered_iopage fits in a page */
     BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
-    if ( !s )
-        return 0;
-
     iorp = &s->bufioreq;
     pg = iorp->va;
 
     if ( !pg )
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
 
     /*
      * Return 0 for the cases we can't deal with:
@@ -2614,7 +2610,7 @@ int hvm_buffered_io_send(ioreq_t *p)
         break;
     default:
         gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     spin_lock(&s->bufioreq_lock);
@@ -2624,7 +2620,7 @@ int hvm_buffered_io_send(ioreq_t *p)
     {
         /* The queue is full: send the iopacket through the normal path. */
         spin_unlock(&s->bufioreq_lock);
-        return 0;
+        return X86EMUL_UNHANDLEABLE;
     }
 
     pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -2654,16 +2650,21 @@ int hvm_buffered_io_send(ioreq_t *p)
     notify_via_xen_event_channel(d, s->bufioreq_evtchn);
     spin_unlock(&s->bufioreq_lock);
 
-    return 1;
+    return X86EMUL_OKAY;
 }
 
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+                   bool_t buffered)
 {
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
     struct hvm_ioreq_vcpu *sv;
 
     ASSERT(s);
+
+    if ( buffered )
+        return hvm_send_buffered_ioreq(s, proto_p);
+
     if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
         return X86EMUL_RETRY;
 
@@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
     return X86EMUL_UNHANDLEABLE;
 }
 
-void hvm_broadcast_assist_req(ioreq_t *p)
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
 {
     struct domain *d = current->domain;
     struct hvm_ioreq_server *s;
+    unsigned int failed = 0;
 
     ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
-        (void) hvm_send_assist_req(s, p);
+        if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+            failed++;
+
+    return failed;
 }
 
 void hvm_hlt(unsigned long rflags)
index bbfc31d1a00ab73f576f9b84ffac128db8916796..d3b9cae70f2f13a27eb0959a782b8d77ebf28170 100644 (file)
@@ -60,8 +60,8 @@ void send_timeoffset_req(unsigned long timeoff)
     if ( timeoff == 0 )
         return;
 
-    if ( !hvm_buffered_io_send(&p) )
-        printk("Unsuccessful timeoffset update\n");
+    if ( hvm_broadcast_ioreq(&p, 1) != 0 )
+        gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
 }
 
 /* Ask ioemu mapcache to invalidate mappings. */
@@ -74,7 +74,8 @@ void send_invalidate_req(void)
         .data = ~0UL, /* flush all */
     };
 
-    hvm_broadcast_assist_req(&p);
+    if ( hvm_broadcast_ioreq(&p, 0) != 0 )
+        gprintk(XENLOG_ERR, "Unsuccessful map-cache invalidate\n");
 }
 
 int handle_mmio(void)
index ebb3b42caa49ccdafa9ec50a8ba8c56841f1858f..47b8432f73d763723827b9234cee57b5d09b5580 100644 (file)
@@ -439,6 +439,7 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
         .dir = IOREQ_WRITE,
         .data = data,
     };
+    struct hvm_ioreq_server *srv;
 
     if ( !s->cache )
         goto done;
@@ -479,10 +480,11 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
     }
 
  done:
-    if ( hvm_buffered_io_send(&p) )
-        return X86EMUL_OKAY;
+    srv = hvm_select_ioreq_server(current->domain, &p);
+    if ( !srv )
+        return X86EMUL_UNHANDLEABLE;
 
-    return X86EMUL_UNHANDLEABLE;
+    return hvm_send_ioreq(srv, &p, 1);
 }
 
 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
index 35f1300855099f2a3a8aaf8804d5904e2a0bffbc..82f1b3275dca0141845b21a1d63d6a1553e7071d 100644 (file)
@@ -226,8 +226,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);
 
 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                                                  ioreq_t *p);
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
-void hvm_broadcast_assist_req(ioreq_t *p);
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *p, bool_t buffered);
+unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered);
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
index 577b21a8217497b4bbc913d3a67fd7c500879f24..cf466890a0e5bb5b16e1ec4cce886c38b9ee3427 100644 (file)
@@ -117,7 +117,6 @@ void relocate_portio_handler(
     struct domain *d, unsigned int old_port, unsigned int new_port,
     unsigned int size);
 
-int hvm_buffered_io_send(ioreq_t *p);
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);