evtchn: convert vIRQ lock to an r/w one
authorJan Beulich <jbeulich@suse.com>
Fri, 11 Dec 2020 10:52:35 +0000 (11:52 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 11 Dec 2020 10:52:35 +0000 (11:52 +0100)
There's no need to serialize all sending of vIRQ-s; all that's needed
is serialization against the closing of the respective event channels
(so far by means of a barrier). To facilitate the conversion, switch to
an ordinary write locked region in evtchn_close().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
xen/common/domain.c
xen/common/event_channel.c
xen/include/xen/sched.h

index f748806a450ba8fceb426f317f1e1822f58e04c7..5ec48c3e19a371edd119ba6626970b5e5f6315c1 100644 (file)
@@ -160,7 +160,7 @@ struct vcpu *vcpu_create(struct domain *d, unsigned int vcpu_id)
     v->vcpu_id = vcpu_id;
     v->dirty_cpu = VCPU_CPU_CLEAN;
 
-    spin_lock_init(&v->virq_lock);
+    rwlock_init(&v->virq_lock);
 
     tasklet_init(&v->continue_hypercall_tasklet, NULL, NULL);
 
index 59f95f2eb235ae83e14d7c85d13c7918ffdd503e..4a4809435691ada868974ed26f6373c9f82dc0de 100644 (file)
@@ -470,6 +470,13 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port)
     evtchn_write_unlock(chn);
 
     bind->port = port;
+    /*
+     * If by any, the update of virq_to_evtchn[] would need guarding by
+     * virq_lock, but since this is the last action here, there's no strict
+     * need to acquire the lock. Hence holding event_lock isn't helpful
+     * anymore at this point, but utilize that its unlocking acts as the
+     * otherwise necessary smp_wmb() here.
+     */
     write_atomic(&v->virq_to_evtchn[virq], port);
 
  out:
@@ -656,10 +663,12 @@ int evtchn_close(struct domain *d1, int port1, bool guest)
     case ECS_VIRQ:
         for_each_vcpu ( d1, v )
         {
-            if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
-                continue;
-            write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
-            spin_barrier(&v->virq_lock);
+            unsigned long flags;
+
+            write_lock_irqsave(&v->virq_lock, flags);
+            if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) == port1 )
+                write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
+            write_unlock_irqrestore(&v->virq_lock, flags);
         }
         break;
 
@@ -809,7 +818,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
 
     ASSERT(!virq_is_global(virq));
 
-    spin_lock_irqsave(&v->virq_lock, flags);
+    read_lock_irqsave(&v->virq_lock, flags);
 
     port = read_atomic(&v->virq_to_evtchn[virq]);
     if ( unlikely(port == 0) )
@@ -824,7 +833,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
     }
 
  out:
-    spin_unlock_irqrestore(&v->virq_lock, flags);
+    read_unlock_irqrestore(&v->virq_lock, flags);
 }
 
 void send_guest_global_virq(struct domain *d, uint32_t virq)
@@ -843,7 +852,7 @@ void send_guest_global_virq(struct domain *d, uint32_t virq)
     if ( unlikely(v == NULL) )
         return;
 
-    spin_lock_irqsave(&v->virq_lock, flags);
+    read_lock_irqsave(&v->virq_lock, flags);
 
     port = read_atomic(&v->virq_to_evtchn[virq]);
     if ( unlikely(port == 0) )
@@ -857,7 +866,7 @@ void send_guest_global_virq(struct domain *d, uint32_t virq)
     }
 
  out:
-    spin_unlock_irqrestore(&v->virq_lock, flags);
+    read_unlock_irqrestore(&v->virq_lock, flags);
 }
 
 void send_guest_pirq(struct domain *d, const struct pirq *pirq)
index 31abbe7a99e37378da830ca0ca158255c0c6c060..faf5fda36f9aae67533849bf7647ad52f34875b8 100644 (file)
@@ -239,7 +239,7 @@ struct vcpu
 
     /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
     evtchn_port_t    virq_to_evtchn[NR_VIRQS];
-    spinlock_t       virq_lock;
+    rwlock_t         virq_lock;
 
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;