spinlock: Introduce spin_lock_cb()
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Wed, 16 Aug 2017 18:31:00 +0000 (20:31 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 22 Aug 2017 08:40:09 +0000 (10:40 +0200)
While waiting for a lock we may want to periodically run some
code. This code may, for example, allow the caller to release
resources held by it that are no longer needed in the critical
section protected by the lock.

Specifically, this feature will be needed by scrubbing code where
the scrubber, while waiting for heap lock to merge back clean
pages, may be requested by page allocator (which is currently
holding the lock) to abort merging and release the buddy page head
that the allocator wants.

We could use spin_trylock() but since it doesn't take lock ticket
it may take long time until the lock is taken. Instead we add
spin_lock_cb() that allows us to grab the ticket and execute a
callback while waiting. This callback is executed on every iteration
of the spinlock waiting loop.

Since we may be sleeping in the lock until it is released we need a
mechanism that will make sure that the callback has a chance to run.
We add spin_lock_kick() that will wake up the waiter.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Julien Grall <julien.grall@arm.com>
xen/common/spinlock.c
xen/include/asm-arm/spinlock.h
xen/include/asm-x86/spinlock.h
xen/include/xen/spinlock.h

index 2a06406e500f55bc09e30ea994ce499ed0f41df4..3c1caae08df8baea3b4c3d5a31a53bb7f71f1385 100644 (file)
@@ -129,7 +129,7 @@ static always_inline u16 observe_head(spinlock_tickets_t *t)
     return read_atomic(&t->head);
 }
 
-void _spin_lock(spinlock_t *lock)
+void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
 {
     spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
     LOCK_PROFILE_VAR;
@@ -140,6 +140,8 @@ void _spin_lock(spinlock_t *lock)
     while ( tickets.tail != observe_head(&lock->tickets) )
     {
         LOCK_PROFILE_BLOCK;
+        if ( unlikely(cb) )
+            cb(data);
         arch_lock_relax();
     }
     LOCK_PROFILE_GOT;
@@ -147,6 +149,11 @@ void _spin_lock(spinlock_t *lock)
     arch_lock_acquire_barrier();
 }
 
+void _spin_lock(spinlock_t *lock)
+{
+     _spin_lock_cb(lock, NULL, NULL);
+}
+
 void _spin_lock_irq(spinlock_t *lock)
 {
     ASSERT(local_irq_is_enabled());
index 8cdf9e18ced0123c63a1ad0b627aee16025c1815..42b0f584fe4e69eb1374b5326bedd4985251f30d 100644 (file)
@@ -10,4 +10,6 @@
     sev();                      \
 } while(0)
 
+#define arch_lock_signal_wmb()  arch_lock_signal()
+
 #endif /* __ASM_SPINLOCK_H */
index be72c0ff8041e555c548a9b290d7cbdca6e9ab8e..56f60957522a627a90e08eab747f5c9a4293f793 100644 (file)
 
 #define arch_lock_relax() cpu_relax()
 #define arch_lock_signal()
+#define arch_lock_signal_wmb()      \
+({                                  \
+    smp_wmb();                      \
+    arch_lock_signal();             \
+})
 
 #endif /* __ASM_SPINLOCK_H */
index c1883bd02ce5d4f4ebfac296379ac7bbaa99ea11..b5ca07d41afe214dc3cae5bf416f4b1071e70734 100644 (file)
@@ -153,6 +153,7 @@ typedef struct spinlock {
 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
 
 void _spin_lock(spinlock_t *lock);
+void _spin_lock_cb(spinlock_t *lock, void (*cond)(void *), void *data);
 void _spin_lock_irq(spinlock_t *lock);
 unsigned long _spin_lock_irqsave(spinlock_t *lock);
 
@@ -169,6 +170,7 @@ void _spin_lock_recursive(spinlock_t *lock);
 void _spin_unlock_recursive(spinlock_t *lock);
 
 #define spin_lock(l)                  _spin_lock(l)
+#define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
 #define spin_lock_irq(l)              _spin_lock_irq(l)
 #define spin_lock_irqsave(l, f)                                 \
     ({                                                          \
@@ -190,6 +192,8 @@ void _spin_unlock_recursive(spinlock_t *lock);
     1 : ({ local_irq_restore(flags); 0; });     \
 })
 
+#define spin_lock_kick(l)             arch_lock_signal_wmb()
+
 /* Ensure a lock is quiescent between two critical operations. */
 #define spin_barrier(l)               _spin_barrier(l)