vm_event: sanitize vm_event response handling
authorTamas K Lengyel <tamas.lengyel@zentific.com>
Mon, 19 Sep 2016 09:38:08 +0000 (11:38 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 19 Sep 2016 09:38:08 +0000 (11:38 +0200)
Setting response flags in vm_event are only ever safe if the vCPUs are paused.
To reflect this we move all checks within the if block that already checks
whether this is the case. Checks that are only supported on one architecture
we relocate the bitmask operations to the arch-specific handlers to avoid
the overhead on architectures that don't support it.

Furthermore, we clean-up the emulation checks so it more clearly represents the
decision-logic when emulation should take place. As part of this we also
set the stage to allow emulation in response to other types of events, not just
mem_access violations.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
xen/arch/x86/mm/p2m.c
xen/arch/x86/vm_event.c
xen/common/vm_event.c
xen/include/asm-arm/p2m.h
xen/include/asm-arm/vm_event.h
xen/include/asm-x86/p2m.h
xen/include/asm-x86/vm_event.h
xen/include/xen/mem_access.h

index 7d14c3bac68c7aadb4031fe9e399559b566a12e5..faffc2ac399ed4cfc4c3f119598915f1b2b86952 100644 (file)
@@ -1588,62 +1588,55 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp)
     }
 }
 
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
                                   const vm_event_response_t *rsp)
 {
-    /* Mark vcpu for skipping one instruction upon rescheduling. */
-    if ( rsp->flags & VM_EVENT_FLAG_EMULATE )
-    {
-        xenmem_access_t access;
-        bool_t violation = 1;
-        const struct vm_event_mem_access *data = &rsp->u.mem_access;
+    xenmem_access_t access;
+    bool violation = 1;
+    const struct vm_event_mem_access *data = &rsp->u.mem_access;
 
-        if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
+    if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
+    {
+        switch ( access )
         {
-            switch ( access )
-            {
-            case XENMEM_access_n:
-            case XENMEM_access_n2rwx:
-            default:
-                violation = data->flags & MEM_ACCESS_RWX;
-                break;
+        case XENMEM_access_n:
+        case XENMEM_access_n2rwx:
+        default:
+            violation = data->flags & MEM_ACCESS_RWX;
+            break;
 
-            case XENMEM_access_r:
-                violation = data->flags & MEM_ACCESS_WX;
-                break;
+        case XENMEM_access_r:
+            violation = data->flags & MEM_ACCESS_WX;
+            break;
 
-            case XENMEM_access_w:
-                violation = data->flags & MEM_ACCESS_RX;
-                break;
+        case XENMEM_access_w:
+            violation = data->flags & MEM_ACCESS_RX;
+            break;
 
-            case XENMEM_access_x:
-                violation = data->flags & MEM_ACCESS_RW;
-                break;
+        case XENMEM_access_x:
+            violation = data->flags & MEM_ACCESS_RW;
+            break;
 
-            case XENMEM_access_rx:
-            case XENMEM_access_rx2rw:
-                violation = data->flags & MEM_ACCESS_W;
-                break;
+        case XENMEM_access_rx:
+        case XENMEM_access_rx2rw:
+            violation = data->flags & MEM_ACCESS_W;
+            break;
 
-            case XENMEM_access_wx:
-                violation = data->flags & MEM_ACCESS_R;
-                break;
+        case XENMEM_access_wx:
+            violation = data->flags & MEM_ACCESS_R;
+            break;
 
-            case XENMEM_access_rw:
-                violation = data->flags & MEM_ACCESS_X;
-                break;
+        case XENMEM_access_rw:
+            violation = data->flags & MEM_ACCESS_X;
+            break;
 
-            case XENMEM_access_rwx:
-                violation = 0;
-                break;
-            }
+        case XENMEM_access_rwx:
+            violation = 0;
+            break;
         }
-
-        v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
-
-        if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
-            v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
     }
+
+    return violation;
 }
 
 void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
index e938ca3d289d4437347e08ee55705a6f6845c25d..343b9c8936728880f4646442992ec7e8b5412254 100644 (file)
@@ -18,6 +18,7 @@
  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <asm/p2m.h>
 #include <asm/vm_event.h>
 
 /* Implicitly serialized by the domctl lock. */
@@ -56,8 +57,12 @@ void vm_event_cleanup_domain(struct domain *d)
     d->arch.mem_access_emulate_each_rep = 0;
 }
 
-void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
+void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+                                vm_event_response_t *rsp)
 {
+    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
+        return;
+
     if ( !is_hvm_domain(d) )
         return;
 
@@ -186,6 +191,34 @@ void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.cs_arbytes = seg.attr.bytes;
 }
 
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+    if ( !(rsp->flags & VM_EVENT_FLAG_EMULATE) )
+    {
+        v->arch.vm_event->emulate_flags = 0;
+        return;
+    }
+
+    switch ( rsp->reason )
+    {
+    case VM_EVENT_REASON_MEM_ACCESS:
+        /*
+         * Emulate iff this is a response to a mem_access violation and there
+         * are still conflicting mem_access permissions in-place.
+         */
+        if ( p2m_mem_access_emulate_check(v, rsp) )
+        {
+            if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+                v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
+
+            v->arch.vm_event->emulate_flags = rsp->flags;
+        }
+        break;
+    default:
+        break;
+    };
+}
+
 /*
  * Local variables:
  * mode: C
index 8398af7203b7f9e88773298d90b15651455157f8..907ab40cde72c8868e59d6dc50d7d1895a2c4356 100644 (file)
@@ -398,42 +398,41 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
          * In some cases the response type needs extra handling, so here
          * we call the appropriate handlers.
          */
-        switch ( rsp.reason )
-        {
-#ifdef CONFIG_X86
-        case VM_EVENT_REASON_MOV_TO_MSR:
-#endif
-        case VM_EVENT_REASON_WRITE_CTRLREG:
-            vm_event_register_write_resume(v, &rsp);
-            break;
-
-#ifdef CONFIG_HAS_MEM_ACCESS
-        case VM_EVENT_REASON_MEM_ACCESS:
-            mem_access_resume(v, &rsp);
-            break;
-#endif
 
+        /* Check flags which apply only when the vCPU is paused */
+        if ( atomic_read(&v->vm_event_pause_count) )
+        {
 #ifdef CONFIG_HAS_MEM_PAGING
-        case VM_EVENT_REASON_MEM_PAGING:
-            p2m_mem_paging_resume(d, &rsp);
-            break;
+            if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
+                p2m_mem_paging_resume(d, &rsp);
 #endif
 
-        };
+            /*
+             * Check emulation flags in the arch-specific handler only, as it
+             * has to set arch-specific flags when supported, and to avoid
+             * bitmask overhead when it isn't supported.
+             */
+            vm_event_emulate_check(v, &rsp);
+
+            /*
+             * Check in arch-specific handler to avoid bitmask overhead when
+             * not supported.
+             */
+            vm_event_register_write_resume(v, &rsp);
 
-        /* Check for altp2m switch */
-        if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
-            p2m_altp2m_check(v, rsp.altp2m_idx);
+            /*
+             * Check in arch-specific handler to avoid bitmask overhead when
+             * not supported.
+             */
+            vm_event_toggle_singlestep(d, v, &rsp);
+
+            /* Check for altp2m switch */
+            if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
+                p2m_altp2m_check(v, rsp.altp2m_idx);
 
-        /* Check flags which apply only when the vCPU is paused */
-        if ( atomic_read(&v->vm_event_pause_count) )
-        {
             if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
                 vm_event_set_registers(v, &rsp);
 
-            if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
-                vm_event_toggle_singlestep(d, v);
-
             if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
                 vm_event_vcpu_unpause(v);
         }
index 53c4d78f35319e615a8f99257f92e163246dde66..6251b37ff2a9a81e6866f7edb4d42696b2585e56 100644 (file)
@@ -121,10 +121,11 @@ typedef enum {
                              p2m_to_mask(p2m_map_foreign)))
 
 static inline
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
                                   const vm_event_response_t *rsp)
 {
     /* Not supported on ARM. */
+    return 0;
 }
 
 static inline
index 948263626fa48ff42d12b16a4062dad183aaec94..66f2474fe135f7beef8e35023d74e8320025ea41 100644 (file)
@@ -34,7 +34,8 @@ static inline void vm_event_cleanup_domain(struct domain *d)
     memset(&d->monitor, 0, sizeof(d->monitor));
 }
 
-static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
+static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+                                              vm_event_response_t *rsp)
 {
     /* Not supported on ARM. */
 }
@@ -45,4 +46,10 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
     /* Not supported on ARM. */
 }
 
+static inline
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+}
+
 #endif /* __ASM_ARM_VM_EVENT_H__ */
index 9fc9eadbaa8a4499907d1d82299dfd4f77bab065..7035860903fb3607d4d3b25061935cb20f53577a 100644 (file)
@@ -677,7 +677,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
 
 /* Check for emulation and mark vcpu for skipping one instruction
  * upon rescheduling if required. */
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
                                   const vm_event_response_t *rsp);
 
 /* Sanity check for mem_access hardware support */
index 294def616025d8cc5c6b28b547de7c12187ea993..ebb5d88ad0e5640bcce1900f53fcdeb6b9d5c59b 100644 (file)
@@ -35,8 +35,11 @@ int vm_event_init_domain(struct domain *d);
 
 void vm_event_cleanup_domain(struct domain *d);
 
-void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
+void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+                                vm_event_response_t *rsp);
 
 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
 
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
+
 #endif /* __ASM_X86_VM_EVENT_H__ */
index 3d054e0c6457e3445168e2f65bfd8ef58ae26a11..da36e072bb5601e6ba45bb1144898d0f2b6ef3a6 100644 (file)
 int mem_access_memop(unsigned long cmd,
                      XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
 
-static inline
-void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp)
-{
-    p2m_mem_access_emulate_check(v, rsp);
-}
-
 #else
 
 static inline
@@ -45,12 +39,6 @@ int mem_access_memop(unsigned long cmd,
     return -ENOSYS;
 }
 
-static inline
-void mem_access_resume(struct vcpu *vcpu, vm_event_response_t *rsp)
-{
-    /* Nothing to do. */
-}
-
 #endif /* HAS_MEM_ACCESS */
 
 #endif /* _XEN_ASM_MEM_ACCESS_H */