pvh: tolerate HVM guests having no ioreq page
authorGeorge Dunlap <george.dunlap@eu.citrix.com>
Wed, 13 Nov 2013 08:29:02 +0000 (09:29 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 13 Nov 2013 08:29:02 +0000 (09:29 +0100)
PVH guests don't have a backing device model emulator (qemu); just
tolerate this situation explicitly, rather than special-casing PVH.

For unhandled IO, hvmemul_do_io() will now return X86EMUL_OKAY, which
is I believe what would be the effect if qemu didn't have a handler
for the IO.

This also fixes a potetial DoS in the host from the reworked series:
If the guest makes a hypercall which sends an invalidate request, it
would have crashed the host.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Eddie Dong <eddie.dong@intel.com>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/io.c
xen/include/asm-x86/hvm/io.h
xen/include/asm-x86/hvm/support.h

index f39c17358e1fa9fe147aa6fb06044176f00595c3..868aa1df516d0304fb5651d53c75f3a1f8dbdad8 100644 (file)
@@ -58,10 +58,23 @@ static int hvmemul_do_io(
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio;
     ioreq_t *p = get_ioreq(curr);
+    ioreq_t _ioreq;
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
     struct page_info *ram_page;
     int rc;
+    bool_t has_dm = 1;
+
+    /*
+     * Domains without a backing DM, don't have an ioreq page.  Just
+     * point to a struct on the stack, initialising the state as needed.
+     */
+    if ( !p )
+    {
+        has_dm = 0;
+        p = &_ioreq;
+        p->state = STATE_IOREQ_NONE;
+    }
 
     /* Check for paged out page */
     ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
@@ -211,7 +224,7 @@ static int hvmemul_do_io(
         p->state = STATE_IORESP_READY;
         if ( !vio->mmio_retry )
         {
-            hvm_io_assist();
+            hvm_io_assist(p);
             vio->io_state = HVMIO_none;
         }
         else
@@ -219,11 +232,20 @@ static int hvmemul_do_io(
             vio->io_state = HVMIO_handle_mmio_awaiting_completion;
         break;
     case X86EMUL_UNHANDLEABLE:
-        rc = X86EMUL_RETRY;
-        if ( !hvm_send_assist_req(curr) )
-            vio->io_state = HVMIO_none;
-        else if ( p_data == NULL )
+        /* If there is no backing DM, just ignore accesses */
+        if ( !has_dm )
+        {
             rc = X86EMUL_OKAY;
+            vio->io_state = HVMIO_none;
+        }
+        else
+        {
+            rc = X86EMUL_RETRY;
+            if ( !hvm_send_assist_req(curr) )
+                vio->io_state = HVMIO_none;
+            else if ( p_data == NULL )
+                rc = X86EMUL_OKAY;
+        }
         break;
     default:
         BUG();
index e7862bc9eb145f13ee4cd1ed1002e068f21644cc..f235a246aa207b20a2ff8abfe57b2c4cfef1aa7f 100644 (file)
@@ -347,13 +347,15 @@ void hvm_do_resume(struct vcpu *v)
     check_wakeup_from_wait();
 
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        goto check_inject_trap;
+
     while ( p->state != STATE_IOREQ_NONE )
     {
         switch ( p->state )
         {
         case STATE_IORESP_READY: /* IORESP_READY -> NONE */
-            hvm_io_assist();
+            hvm_io_assist(p);
             break;
         case STATE_IOREQ_READY:  /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
         case STATE_IOREQ_INPROCESS:
@@ -368,6 +370,7 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
+ check_inject_trap:
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
@@ -1227,7 +1230,9 @@ bool_t hvm_send_assist_req(struct vcpu *v)
     if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
         return 0; /* implicitly bins the i/o operation */
 
-    p = get_ioreq(v);
+    if ( !(p = get_ioreq(v)) )
+        return 0;
+
     if ( unlikely(p->state != STATE_IOREQ_NONE) )
     {
         /* This indicates a bug in the device model. Crash the domain. */
index feb0406c2e4235be051eb2747f2040db21ef57b4..deb7b92fdca3ca594f80316e6b448c6f71fdbde2 100644 (file)
@@ -152,6 +152,9 @@ void send_invalidate_req(void)
     struct vcpu *v = current;
     ioreq_t *p = get_ioreq(v);
 
+    if ( !p )
+        return;
+
     if ( p->state != STATE_IOREQ_NONE )
     {
         gdprintk(XENLOG_ERR, "WARNING: send invalidate req with something "
@@ -262,11 +265,10 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }
 
-void hvm_io_assist(void)
+void hvm_io_assist(ioreq_t *p)
 {
     struct vcpu *curr = current;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    ioreq_t *p = get_ioreq(curr);
     enum hvm_io_state io_state;
 
     rmb(); /* see IORESP_READY /then/ read contents of ioreq */
index b0718b896c9073c75c7a8ec1c089da6121c3400a..6f4cb9640819608be1363056c328ed307e283b8d 100644 (file)
@@ -121,7 +121,7 @@ int handle_mmio(void);
 int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(void);
+void hvm_io_assist(ioreq_t *p);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   union vioapic_redir_entry *ent);
 
index 52aef1f1ce709af3570c3120a9f3e752902d5252..35294997ffdc1927531ddf8a461151f8d9442e4e 100644 (file)
@@ -32,8 +32,7 @@ static inline ioreq_t *get_ioreq(struct vcpu *v)
     struct domain *d = v->domain;
     shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
     ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
-    ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
-    return &p->vcpu_ioreq[v->vcpu_id];
+    return p ? &p->vcpu_ioreq[v->vcpu_id] : NULL;
 }
 
 #define HVM_DELIVER_NO_ERROR_CODE  -1