From: Andres Lagar-Cavilla Date: Thu, 26 Jan 2012 12:46:26 +0000 (+0000) Subject: x86/mm: Enforce lock ordering for sharing page locks X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=8ed0c272481880b75442e62aad75c30ecfa798ce;p=xen.git x86/mm: Enforce lock ordering for sharing page locks Use the ordering constructs in mm-locks.h to enforce an order for the p2m and page locks in the sharing code. Applies to either the global sharing lock (in audit mode) or the per page locks. Signed-off-by: Andres Lagar-Cavilla Signed-off-by: Adin Scanneell Acked-by: Tim Deegan Committed-by: Tim Deegan --- diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index a9ed1aa012..237b5509f8 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -37,6 +37,13 @@ static shr_handle_t next_handle = 1; +typedef struct pg_lock_data { + int mm_unlock_level; + unsigned short recurse_count; +} pg_lock_data_t; + +DEFINE_PER_CPU(pg_lock_data_t, __pld); + #if MEM_SHARING_AUDIT static mm_lock_t shr_lock; @@ -85,16 +92,25 @@ static inline int mem_sharing_page_lock(struct page_info *p) static inline int mem_sharing_page_lock(struct page_info *pg) { int rc; + pg_lock_data_t *pld = &(this_cpu(__pld)); + + page_sharing_mm_pre_lock(); rc = page_lock(pg); if ( rc ) { preempt_disable(); + page_sharing_mm_post_lock(&pld->mm_unlock_level, + &pld->recurse_count); } return rc; } static inline void mem_sharing_page_unlock(struct page_info *pg) { + pg_lock_data_t *pld = &(this_cpu(__pld)); + + page_sharing_mm_unlock(pld->mm_unlock_level, + &pld->recurse_count); preempt_enable(); page_unlock(pg); } diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h index eaafbe1086..836bfecc7d 100644 --- a/xen/arch/x86/mm/mm-locks.h +++ b/xen/arch/x86/mm/mm-locks.h @@ -156,7 +156,23 @@ declare_mm_lock(shr) #else -/* We use an efficient per-page lock when AUDIT is not enabled. */ +/* Sharing per page lock + * + * This is an external lock, not represented by an mm_lock_t. The memory + * sharing lock uses it to protect addition and removal of (gfn,domain) + * tuples to a shared page. We enforce order here against the p2m lock, + * which is taken after the page_lock to change the gfn's p2m entry. + * + * Note that in sharing audit mode, we use the global page lock above, + * instead. + * + * The lock is recursive because during share we lock two pages. */ + +declare_mm_order_constraint(per_page_sharing) +#define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing() +#define page_sharing_mm_post_lock(l, r) \ + mm_enforce_order_lock_post_per_page_sharing((l), (r)) +#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r)) #endif /* MEM_SHARING_AUDIT */ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index c8d5d66e4f..3c2090f035 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -351,7 +351,8 @@ void clear_superpage_mark(struct page_info *page); * backing. Nesting may happen when sharing (and locking) two pages -- deadlock * is avoided by locking pages in increasing order. * Memory sharing may take the p2m_lock within a page_lock/unlock - * critical section. + * critical section. We enforce ordering between page_lock and p2m_lock using an + * mm-locks.h construct. * * These two users (pte serialization and memory sharing) do not collide, since * sharing is only supported for hvm guests, which do not perform pv pte updates.