return read_atomic(&t->head);
}
-void _spin_lock(spinlock_t *lock)
+void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
{
spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
LOCK_PROFILE_VAR;
while ( tickets.tail != observe_head(&lock->tickets) )
{
LOCK_PROFILE_BLOCK;
+ if ( unlikely(cb) )
+ cb(data);
arch_lock_relax();
}
LOCK_PROFILE_GOT;
arch_lock_acquire_barrier();
}
+void _spin_lock(spinlock_t *lock)
+{
+ _spin_lock_cb(lock, NULL, NULL);
+}
+
void _spin_lock_irq(spinlock_t *lock)
{
ASSERT(local_irq_is_enabled());
#define arch_lock_relax() cpu_relax()
#define arch_lock_signal()
+#define arch_lock_signal_wmb() \
+({ \
+ smp_wmb(); \
+ arch_lock_signal(); \
+})
#endif /* __ASM_SPINLOCK_H */
#define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
void _spin_lock(spinlock_t *lock);
+void _spin_lock_cb(spinlock_t *lock, void (*cond)(void *), void *data);
void _spin_lock_irq(spinlock_t *lock);
unsigned long _spin_lock_irqsave(spinlock_t *lock);
void _spin_unlock_recursive(spinlock_t *lock);
#define spin_lock(l) _spin_lock(l)
+#define spin_lock_cb(l, c, d) _spin_lock_cb(l, c, d)
#define spin_lock_irq(l) _spin_lock_irq(l)
#define spin_lock_irqsave(l, f) \
({ \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#define spin_lock_kick(l) arch_lock_signal_wmb()
+
/* Ensure a lock is quiescent between two critical operations. */
#define spin_barrier(l) _spin_barrier(l)