static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
-static void check_lock(union lock_debug *debug, bool try)
+void check_lock(union lock_debug *debug, bool try)
{
bool irq_safe = !local_irq_is_enabled();
#else /* CONFIG_DEBUG_LOCKS */
-#define check_lock(l, t) ((void)0)
#define check_barrier(l) ((void)0)
#define got_lock(l) ((void)0)
#define rel_lock(l) ((void)0)
u32 cnts;
preempt_disable();
+ check_lock(&lock->lock.debug, true);
cnts = atomic_read(&lock->cnts);
if ( likely(_can_read_lock(cnts)) )
{
* arch_lock_acquire_barrier().
*/
if ( likely(_can_read_lock(cnts)) )
+ {
+ /* The slow path calls check_lock() via spin_lock(). */
+ check_lock(&lock->lock.debug, false);
return;
+ }
/* The slowpath will decrement the reader count, if necessary. */
queue_read_lock_slowpath(lock);
* arch_lock_acquire_barrier().
*/
if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
+ {
+ /* The slow path calls check_lock() via spin_lock(). */
+ check_lock(&lock->lock.debug, false);
return;
+ }
queue_write_lock_slowpath(lock);
/*
u32 cnts;
preempt_disable();
+ check_lock(&lock->lock.debug, true);
cnts = atomic_read(&lock->cnts);
if ( unlikely(cnts) ||
unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
/* Drop the read lock because we don't need it anymore. */
read_unlock(&percpu_rwlock->rwlock);
}
+ else
+ {
+ /* All other paths have implicit check_lock() calls via read_lock(). */
+ check_lock(&percpu_rwlock->rwlock.lock.debug, false);
+ }
}
static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
};
};
#define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
+void check_lock(union lock_debug *debug, bool try);
void spin_debug_enable(void);
void spin_debug_disable(void);
#else
union lock_debug { };
#define _LOCK_DEBUG { }
+#define check_lock(l, t) ((void)0)
#define spin_debug_enable() ((void)0)
#define spin_debug_disable() ((void)0)
#endif