From 3ceb653eb50aaabbe41c3b6d8bc017aa76b37b74 Mon Sep 17 00:00:00 2001 From: Tim Deegan Date: Thu, 10 Nov 2011 11:12:35 +0000 Subject: [PATCH] x86/mm: Declare an order-enforcing construct for external locks used in the mm layer Declare an order-enforcing construct for a lock used in the mm layer that is not of type mm_lock_t. This is useful whenever the mm layer takes locks from other subsystems, or locks not implemented as mm_lock_t. Signed-off-by: Andres Lagar-Cavilla Acked-by: Tim Deegan Committed-by: Tim Deegan --- xen/arch/x86/mm/mm-locks.h | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index 6f9610c477..5dbe0afa60 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -70,6 +70,27 @@ static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec) panic("mm lock already held by %s\n", l->locker_function); __set_lock_level(level); } + +static inline void _mm_enforce_order_lock_pre(int level) +{ + __check_lock_level(level); +} + +static inline void _mm_enforce_order_lock_post(int level, int *unlock_level, + unsigned short *recurse_count) +{ + if ( recurse_count ) + { + if ( *recurse_count++ == 0 ) + { + *unlock_level = __get_lock_level(); + } + } else { + *unlock_level = __get_lock_level(); + } + __set_lock_level(level); +} + /* This wrapper uses the line number to express the locking order below */ #define declare_mm_lock(name) \ static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\ @@ -78,6 +99,16 @@ static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec) #define mm_lock(name, l) mm_lock_##name(l, __func__, 0) #define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1) +/* This wrapper is intended for "external" locks which do not use + * the mm_lock_t types. Such locks inside the mm code are also subject + * to ordering constraints. */ +#define declare_mm_order_constraint(name) \ + static inline void mm_enforce_order_lock_pre_##name(void) \ + { _mm_enforce_order_lock_pre(__LINE__); } \ + static inline void mm_enforce_order_lock_post_##name( \ + int *unlock_level, unsigned short *recurse_count) \ + { _mm_enforce_order_lock_post(__LINE__, unlock_level, recurse_count); } \ + static inline void mm_unlock(mm_lock_t *l) { if ( l->lock.recurse_cnt == 1 ) @@ -88,6 +119,21 @@ static inline void mm_unlock(mm_lock_t *l) spin_unlock_recursive(&l->lock); } +static inline void mm_enforce_order_unlock(int unlock_level, + unsigned short *recurse_count) +{ + if ( recurse_count ) + { + BUG_ON(*recurse_count == 0); + if ( *recurse_count-- == 1 ) + { + __set_lock_level(unlock_level); + } + } else { + __set_lock_level(unlock_level); + } +} + /************************************************************************ * * * To avoid deadlocks, these locks _MUST_ be taken in the order they're * -- 2.30.2