x86/hvm: serialize trap injecting producer and consumer
authorJan Beulich <jbeulich@suse.com>
Wed, 25 Jan 2017 09:51:10 +0000 (10:51 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 25 Jan 2017 09:51:10 +0000 (10:51 +0100)
Since injection works on a remote vCPU, and since there's no
enforcement of the subject vCPU being paused, there's a potential race
between the producing and consuming sides. Fix this by leveraging the
vector field as synchronization variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
[re-based]
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/hvm/dm.c
xen/arch/x86/hvm/hvm.c
xen/include/asm-x86/hvm/hvm.h

index 0c013d6ce5c1cbd05de41ffd28c5e2866adbe91d..6a722a5dc47608798ba650de6a8b6b6ec81a0322 100644 (file)
@@ -255,13 +255,16 @@ static int inject_event(struct domain *d,
     if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) )
         return -EINVAL;
 
-    if ( v->arch.hvm_vcpu.inject_event.vector != -1 )
+    if ( cmpxchg(&v->arch.hvm_vcpu.inject_event.vector,
+                 HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) !=
+         HVM_EVENT_VECTOR_UNSET )
         return -EBUSY;
 
     v->arch.hvm_vcpu.inject_event.type = data->type;
     v->arch.hvm_vcpu.inject_event.insn_len = data->insn_len;
     v->arch.hvm_vcpu.inject_event.error_code = data->error_code;
     v->arch.hvm_vcpu.inject_event.cr2 = data->cr2;
+    smp_wmb();
     v->arch.hvm_vcpu.inject_event.vector = data->vector;
 
     return 0;
index 342df74aed5977dccb29c83da5b4f8ba98d8ae9c..9ffc21bb44c194f1c569f924914cc8352b1077e1 100644 (file)
@@ -542,13 +542,15 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
-    /* Inject pending hw/sw trap */
-    if ( v->arch.hvm_vcpu.inject_event.vector != -1 )
+    /* Inject pending hw/sw event */
+    if ( v->arch.hvm_vcpu.inject_event.vector >= 0 )
     {
+        smp_rmb();
+
         if ( !hvm_event_pending(v) )
             hvm_inject_event(&v->arch.hvm_vcpu.inject_event);
 
-        v->arch.hvm_vcpu.inject_event.vector = -1;
+        v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
     }
 
     if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
@@ -1519,7 +1521,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
         (void(*)(unsigned long))hvm_assert_evtchn_irq,
         (unsigned long)v);
 
-    v->arch.hvm_vcpu.inject_event.vector = -1;
+    v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
 
     if ( is_pvh_domain(d) )
     {
index 329831cf93c0e1bbd86281c15cc5ce5f088103cf..87b203a6d4092bf185a7b8eef195a068fda5708a 100644 (file)
@@ -77,6 +77,9 @@ enum hvm_intblk {
 #define HVM_HAP_SUPERPAGE_2MB   0x00000001
 #define HVM_HAP_SUPERPAGE_1GB   0x00000002
 
+#define HVM_EVENT_VECTOR_UNSET    (-1)
+#define HVM_EVENT_VECTOR_UPDATING (-2)
+
 /*
  * The hardware virtual machine (HVM) interface abstracts away from the
  * x86/x86_64 CPU virtualization assist specifics. Currently this interface