#define __RWLOCK_H__
#include <xen/percpu.h>
+#include <xen/preempt.h>
#include <xen/smp.h>
#include <xen/spinlock.h>
{
u32 cnts;
+ preempt_disable();
cnts = atomic_read(&lock->cnts);
if ( likely(_can_read_lock(cnts)) )
{
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
}
+ preempt_enable();
return 0;
}
{
u32 cnts;
+ preempt_disable();
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
if ( likely(_can_read_lock(cnts)) )
return;
* Atomically decrement the reader count
*/
atomic_sub(_QR_BIAS, &lock->cnts);
+ preempt_enable();
}
static inline void _read_unlock_irq(rwlock_t *lock)
static inline void _write_lock(rwlock_t *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
+ preempt_disable();
if ( atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0 )
return;
{
u32 cnts;
+ preempt_disable();
cnts = atomic_read(&lock->cnts);
- if ( unlikely(cnts) )
+ if ( unlikely(cnts) ||
+ unlikely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) != 0) )
+ {
+ preempt_enable();
return 0;
+ }
- return likely(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0);
+ return 1;
}
static inline void _write_unlock(rwlock_t *lock)
{
ASSERT(_is_write_locked_by_me(atomic_read(&lock->cnts)));
atomic_and(~(_QW_CPUMASK | _QW_WMASK), &lock->cnts);
+ preempt_enable();
}
static inline void _write_unlock_irq(rwlock_t *lock)
}
/* Indicate this cpu is reading. */
+ preempt_disable();
this_cpu_ptr(per_cpudata) = percpu_rwlock;
smp_mb();
/* Check if a writer is waiting. */
}
this_cpu_ptr(per_cpudata) = NULL;
smp_wmb();
+ preempt_enable();
}
/* Don't inline percpu write lock as it's a complex function. */