x86/mm: dedup the various copies of the shadow lock functions
authorTim Deegan <Tim.Deegan@citrix.com>
Thu, 2 Jun 2011 12:16:52 +0000 (13:16 +0100)
committerTim Deegan <Tim.Deegan@citrix.com>
Thu, 2 Jun 2011 12:16:52 +0000 (13:16 +0100)
Define the lock and unlock functions once, and list all the locks in one
place so (a) it's obvious what the locking discipline is and (b) none of
the locks are visible to non-mm code.  Automatically enforce that these
locks never get taken in the wrong order.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
17 files changed:
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm/hap/hap.c
xen/arch/x86/mm/hap/private.h
xen/arch/x86/mm/mem_sharing.c
xen/arch/x86/mm/mm-locks.h [new file with mode: 0644]
xen/arch/x86/mm/p2m-ept.c
xen/arch/x86/mm/p2m-pod.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/mm/paging.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/mm/shadow/private.h
xen/include/asm-x86/domain.h
xen/include/asm-x86/hap.h
xen/include/asm-x86/mm.h
xen/include/asm-x86/p2m.h

index 42e0df28659b0fe69ce569694b4820ef78e12be9..8d817354c01415731107c35e857e7736cf484812 100644 (file)
@@ -1215,7 +1215,6 @@ void ept_sync_domain(struct domain *d)
         return;
 
     ASSERT(local_irq_is_enabled());
-    ASSERT(p2m_locked_by_me(p2m_get_hostp2m(d)));
 
     /*
      * Flush active cpus synchronously. Flush others the next time this domain
index e80e8baf70e748085d6cb24246198c216acf603e..c773bf152f264ba8d7118b16eefa558ab2e08af3 100644 (file)
@@ -572,7 +572,7 @@ static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
 /************************************************/
 void hap_domain_init(struct domain *d)
 {
-    hap_lock_init(d);
+    mm_lock_init(&d->arch.paging.hap.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
 }
 
index 2ffdd1410aa65f83a9bd2825cb4368ffb1d9aa24..6dcd12883816c7ea92c0a7e2258c8f569c997f66 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __HAP_PRIVATE_H__
 #define __HAP_PRIVATE_H__
 
+#include "../mm-locks.h"
+
 /********************************************/
 /*          GUEST TRANSLATION FUNCS         */
 /********************************************/
index d575008b15a189765ca2bd41de286c1f57b46805..e6299031722cfc1d12f878f0e1dd66e916b861cc 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/mem_event.h>
 #include <asm/atomic.h>
 
+#include "mm-locks.h"
+
 /* Auditing of memory sharing code? */
 #define MEM_SHARING_AUDIT  0
 
@@ -74,13 +76,7 @@ typedef struct gfn_info
     struct list_head list;
 } gfn_info_t;
 
-typedef struct shr_lock
-{
-    spinlock_t  lock;            /* mem sharing lock */
-    int         locker;          /* processor which holds the lock */
-    const char *locker_function; /* func that took it */
-} shr_lock_t;
-static shr_lock_t shr_lock;
+static mm_lock_t shr_lock;
 
 /* Returns true if list has only one entry. O(1) complexity. */
 static inline int list_has_one_entry(struct list_head *head)
@@ -93,43 +89,11 @@ static inline struct gfn_info* gfn_get_info(struct list_head *list)
     return list_entry(list->next, struct gfn_info, list);
 }
 
-#define shr_lock_init(_i)                      \
-    do {                                       \
-        spin_lock_init(&shr_lock.lock);        \
-        shr_lock.locker = -1;                  \
-        shr_lock.locker_function = "nobody";   \
-    } while (0)
-
-#define shr_locked_by_me(_i)                   \
-    (current->processor == shr_lock.locker)
-
-#define shr_lock(_i)                                           \
-    do {                                                       \
-        if ( unlikely(shr_lock.locker == current->processor) ) \
-        {                                                      \
-            printk("Error: shr lock held by %s\n",             \
-                   shr_lock.locker_function);                  \
-            BUG();                                             \
-        }                                                      \
-        spin_lock(&shr_lock.lock);                             \
-        ASSERT(shr_lock.locker == -1);                         \
-        shr_lock.locker = current->processor;                  \
-        shr_lock.locker_function = __func__;                   \
-    } while (0)
-
-#define shr_unlock(_i)                                    \
-    do {                                                  \
-        ASSERT(shr_lock.locker == current->processor);    \
-        shr_lock.locker = -1;                             \
-        shr_lock.locker_function = "nobody";              \
-        spin_unlock(&shr_lock.lock);                      \
-    } while (0)
-
 static void __init mem_sharing_hash_init(void)
 {
     int i;
 
-    shr_lock_init();
+    mm_lock_init(&shr_lock);
     for(i=0; i<SHR_HASH_LENGTH; i++)
         shr_hash[i] = NULL;
 }
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
new file mode 100644 (file)
index 0000000..b8372ba
--- /dev/null
@@ -0,0 +1,161 @@
+/******************************************************************************
+ * arch/x86/mm/mm-locks.h
+ *
+ * Spinlocks used by the code in arch/x86/mm.
+ *
+ * Copyright (c) 2011 Citrix Systems, inc. 
+ * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
+ * Copyright (c) 2006-2007 XenSource Inc.
+ * Copyright (c) 2006 Michael A Fetterman
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _MM_LOCKS_H
+#define _MM_LOCKS_H
+
+/* Per-CPU variable for enforcing the lock ordering */
+DECLARE_PER_CPU(int, mm_lock_level);
+
+static inline void mm_lock_init(mm_lock_t *l)
+{
+    spin_lock_init(&l->lock);
+    l->locker = -1;
+    l->locker_function = "nobody";
+    l->unlock_level = 0;
+}
+
+static inline void _mm_lock(mm_lock_t *l, const char *func, int level)
+{
+    if ( unlikely(l->locker == current->processor) )
+        panic("mm lock held by %s\n", l->locker_function);
+    /* If you see this crash, the numbers printed are lines in this file 
+     * where the offending locks are declared. */
+    if ( unlikely(this_cpu(mm_lock_level) >= level) )
+        panic("mm locking order violation: %i >= %i\n", 
+              this_cpu(mm_lock_level), level);
+    spin_lock(&l->lock);
+    ASSERT(l->locker == -1);
+    l->locker = current->processor;
+    l->locker_function = func;
+    l->unlock_level = this_cpu(mm_lock_level);
+    this_cpu(mm_lock_level) = level;
+}
+/* This wrapper uses the line number to express the locking order below */
+#define declare_mm_lock(name)                                             \
+  static inline void mm_lock_##name(mm_lock_t *l, const char *func)       \
+  { _mm_lock(l, func, __LINE__); }
+/* This one captures the name of the calling function */
+#define mm_lock(name, l) mm_lock_##name(l, __func__)
+
+static inline void mm_unlock(mm_lock_t *l)
+{
+    ASSERT(l->locker == current->processor);
+    l->locker = -1;
+    l->locker_function = "nobody";
+    this_cpu(mm_lock_level) = l->unlock_level;
+    l->unlock_level = -1;
+    spin_unlock(&l->lock);
+}
+
+static inline int mm_locked_by_me(mm_lock_t *l) 
+{
+    return (current->processor == l->locker);
+}
+
+/************************************************************************
+ *                                                                      *
+ * To avoid deadlocks, these locks _MUST_ be taken in the order they're *
+ * declared in this file.  The locking functions will enforce this.     *
+ *                                                                      *
+ ************************************************************************/
+
+/* Page-sharing lock (global) 
+ *
+ * A single global lock that protects the memory-sharing code's
+ * hash tables. */
+
+declare_mm_lock(shr)
+#define shr_lock()         mm_lock(shr, &shr_lock)
+#define shr_unlock()       mm_unlock(&shr_lock)
+#define shr_locked_by_me() mm_locked_by_me(&shr_lock)
+
+/* Nested P2M lock (per-domain)
+ *
+ * A per-domain lock that protects some of the nested p2m datastructures.
+ * TODO: find out exactly what needs to be covered by this lock */
+
+declare_mm_lock(nestedp2m)
+#define nestedp2m_lock(d)   mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
+#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
+
+/* P2M lock (per-p2m-table)
+ * 
+ * This protects all updates to the p2m table.  Updates are expected to
+ * be safe against concurrent reads, which do *not* require the lock. */
+
+declare_mm_lock(p2m)
+#define p2m_lock(p)         mm_lock(p2m, &(p)->lock)
+#define p2m_unlock(p)       mm_unlock(&(p)->lock)
+#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
+
+/* Shadow lock (per-domain)
+ *
+ * This lock is intended to allow us to make atomic updates to the
+ * software TLB that the shadow pagetables provide.
+ *
+ * Specifically, it protects:
+ *   - all changes to shadow page table pages
+ *   - the shadow hash table
+ *   - the shadow page allocator 
+ *   - all changes to guest page table pages
+ *   - all changes to the page_info->tlbflush_timestamp
+ *   - the page_info->count fields on shadow pages */
+
+declare_mm_lock(shadow)
+#define shadow_lock(d)         mm_lock(shadow, &(d)->arch.paging.shadow.lock)
+#define shadow_unlock(d)       mm_unlock(&(d)->arch.paging.shadow.lock)
+#define shadow_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.shadow.lock)
+
+/* HAP lock (per-domain)
+ * 
+ * Equivalent of the shadow lock for HAP.  Protects updates to the
+ * NPT and EPT tables, and the HAP page allocator. */
+
+declare_mm_lock(hap)
+#define hap_lock(d)         mm_lock(hap, &(d)->arch.paging.hap.lock)
+#define hap_unlock(d)       mm_unlock(&(d)->arch.paging.hap.lock)
+#define hap_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.hap.lock)
+
+/* Log-dirty lock (per-domain) 
+ * 
+ * Protects the log-dirty bitmap from concurrent accesses (and teardowns, etc).
+ *
+ * Because mark_dirty is called from a lot of places, the log-dirty lock
+ * may be acquired with the shadow or HAP locks already held.  When the
+ * log-dirty code makes callbacks into HAP or shadow code to reset
+ * various traps that will trigger the mark_dirty calls, it must *not*
+ * have the log-dirty lock held, or it risks deadlock.  Because the only
+ * purpose of those calls is to make sure that *guest* actions will
+ * cause mark_dirty to be called (hypervisor actions explictly call it
+ * anyway), it is safe to release the log-dirty lock before the callback
+ * as long as the domain is paused for the entire operation. */
+
+declare_mm_lock(log_dirty)
+#define log_dirty_lock(d) mm_lock(log_dirty, &(d)->arch.paging.log_dirty.lock)
+#define log_dirty_unlock(d) mm_unlock(&(d)->arch.paging.log_dirty.lock)
+
+
+#endif /* _MM_LOCKS_H */
index 16a5c9a4077b173ff504b15a01cfdc623c0c2de2..bf2c25d3b865b9aced873a5c51845f31ebc180dd 100644 (file)
@@ -32,6 +32,8 @@
 #include <xen/keyhandler.h>
 #include <xen/softirq.h>
 
+#include "mm-locks.h"
+
 #define atomic_read_ept_entry(__pepte)                              \
     ( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
 #define atomic_write_ept_entry(__pepte, __epte)                     \
index e26d1621e9d8592333f7d20e1135dcbab3a40d01..e83dfffc9c3a5ee40feede4556e0ad5a3023a5c5 100644 (file)
@@ -32,7 +32,8 @@
 #include <xen/event.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
+
+#include "mm-locks.h"
 
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
@@ -375,7 +376,7 @@ p2m_pod_empty_cache(struct domain *d)
 
     /* After this barrier no new PoD activities can happen. */
     BUG_ON(!d->is_dying);
-    spin_barrier(&p2m->lock);
+    spin_barrier(&p2m->lock.lock);
 
     spin_lock(&d->page_alloc_lock);
 
index 9c20c97547030e72096bcd55f350d04ae3cb5ab0..c362267c6562bff2d731e5794f06b60acaa132b5 100644 (file)
@@ -38,6 +38,8 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+#include "mm-locks.h"
+
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
index 55ef2088700d814fa50609db6417d1aa793e897f..25f5381eb376cc1505c81d7d0c86a63cbbba8f04 100644 (file)
@@ -37,6 +37,8 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+#include "mm-locks.h"
+
 /* turn on/off 1GB host page table support for hap, default on */
 static bool_t __read_mostly opt_hap_1gb = 1;
 boolean_param("hap_1gb", opt_hap_1gb);
@@ -70,7 +72,7 @@ boolean_param("hap_2mb", opt_hap_2mb);
 static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 {
     memset(p2m, 0, sizeof(*p2m));
-    p2m_lock_init(p2m);
+    mm_lock_init(&p2m->lock);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
     INIT_PAGE_LIST_HEAD(&p2m->pod.super);
     INIT_PAGE_LIST_HEAD(&p2m->pod.single);
@@ -95,7 +97,7 @@ p2m_init_nestedp2m(struct domain *d)
     uint8_t i;
     struct p2m_domain *p2m;
 
-    nestedp2m_lock_init(d);
+    mm_lock_init(&d->arch.nested_p2m_lock);
     for (i = 0; i < MAX_NESTEDP2M; i++) {
         d->arch.nested_p2m[i] = p2m = xmalloc(struct p2m_domain);
         if (p2m == NULL)
index 65b53ee7d43bfa51313e5c4b4e011fa2ae3122e7..813bcdbde0443d3a3e31bbca0f6420fa00bede2f 100644 (file)
@@ -30,6 +30,8 @@
 #include <xen/numa.h>
 #include <xsm/xsm.h>
 
+#include "mm-locks.h"
+
 /* Printouts */
 #define PAGING_PRINTK(_f, _a...)                                     \
     debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
@@ -41,9 +43,9 @@
             debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
     } while (0)
 
-/************************************************/
-/*              LOG DIRTY SUPPORT               */
-/************************************************/
+/* Per-CPU variable for enforcing the lock ordering */
+DEFINE_PER_CPU(int, mm_lock_level);
+
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef mfn_to_page
 #define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
 #undef page_to_mfn
 #define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
 
-/* The log-dirty lock.  This protects the log-dirty bitmap from
- * concurrent accesses (and teardowns, etc).
- *
- * Locking discipline: always acquire shadow or HAP lock before this one.
- *
- * Because mark_dirty is called from a lot of places, the log-dirty lock
- * may be acquired with the shadow or HAP locks already held.  When the
- * log-dirty code makes callbacks into HAP or shadow code to reset
- * various traps that will trigger the mark_dirty calls, it must *not*
- * have the log-dirty lock held, or it risks deadlock.  Because the only
- * purpose of those calls is to make sure that *guest* actions will
- * cause mark_dirty to be called (hypervisor actions explictly call it
- * anyway), it is safe to release the log-dirty lock before the callback
- * as long as the domain is paused for the entire operation. */
-
-#define log_dirty_lock_init(_d)                                   \
-    do {                                                          \
-        spin_lock_init(&(_d)->arch.paging.log_dirty.lock);        \
-        (_d)->arch.paging.log_dirty.locker = -1;                  \
-        (_d)->arch.paging.log_dirty.locker_function = "nobody";   \
-    } while (0)
-
-#define log_dirty_lock(_d)                                                   \
-    do {                                                                     \
-        if (unlikely((_d)->arch.paging.log_dirty.locker==current->processor))\
-        {                                                                    \
-            printk("Error: paging log dirty lock held by %s\n",              \
-                   (_d)->arch.paging.log_dirty.locker_function);             \
-            BUG();                                                           \
-        }                                                                    \
-        spin_lock(&(_d)->arch.paging.log_dirty.lock);                        \
-        ASSERT((_d)->arch.paging.log_dirty.locker == -1);                    \
-        (_d)->arch.paging.log_dirty.locker = current->processor;             \
-        (_d)->arch.paging.log_dirty.locker_function = __func__;              \
-    } while (0)
-
-#define log_dirty_unlock(_d)                                              \
-    do {                                                                  \
-        ASSERT((_d)->arch.paging.log_dirty.locker == current->processor); \
-        (_d)->arch.paging.log_dirty.locker = -1;                          \
-        (_d)->arch.paging.log_dirty.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.log_dirty.lock);                   \
-    } while (0)
+/************************************************/
+/*              LOG DIRTY SUPPORT               */
+/************************************************/
 
 static mfn_t paging_new_log_dirty_page(struct domain *d)
 {
@@ -671,7 +633,7 @@ void paging_log_dirty_init(struct domain *d,
                            void   (*clean_dirty_bitmap)(struct domain *d))
 {
     /* We initialize log dirty lock first */
-    log_dirty_lock_init(d);
+    mm_lock_init(&d->arch.paging.log_dirty.lock);
 
     d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
     d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
index 849dec347cff0fd8377b7b8d41617ecac171be55..c18b90a9071d473eb99910f73c19b8c320580ed8 100644 (file)
@@ -45,7 +45,7 @@ DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
  * Called for every domain from arch_domain_create() */
 void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
 {
-    shadow_lock_init(d);
+    mm_lock_init(&d->arch.paging.shadow.lock);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
index 43958ffa3ed5e516066e3f63c0807570212e7f5e..f1263a4946c18a97d6d10d1e6ff7fab47339a453 100644 (file)
@@ -3136,7 +3136,7 @@ static int sh_page_fault(struct vcpu *v,
     if ( unlikely(shadow_locked_by_me(d)) )
     {
         SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
-                     d->arch.paging.shadow.locker_function);
+                     d->arch.paging.shadow.lock.locker_function);
         return 0;
     }
 
index c12a05c363ba7143774860ff82eb4265ea6d2b5d..d7814192925c4d93b93fe7011ed851eb370c9e56 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/x86_emulate.h>
 #include <asm/hvm/support.h>
 
+#include "../mm-locks.h"
 
 /******************************************************************************
  * Levels of self-test and paranoia
@@ -128,57 +129,6 @@ enum {
     TRCE_SFLAG_OOS_FIXUP_EVICT,
 };
 
-/******************************************************************************
- * The shadow lock.
- *
- * This lock is per-domain.  It is intended to allow us to make atomic
- * updates to the software TLB that the shadow tables provide.
- * 
- * Specifically, it protects:
- *   - all changes to shadow page table pages
- *   - the shadow hash table
- *   - the shadow page allocator 
- *   - all changes to guest page table pages
- *   - all changes to the page_info->tlbflush_timestamp
- *   - the page_info->count fields on shadow pages
- *   - the shadow dirty bit array and count
- */
-#ifndef CONFIG_SMP
-#error shadow.h currently requires CONFIG_SMP
-#endif
-
-#define shadow_lock_init(_d)                                   \
-    do {                                                       \
-        spin_lock_init(&(_d)->arch.paging.shadow.lock);        \
-        (_d)->arch.paging.shadow.locker = -1;                  \
-        (_d)->arch.paging.shadow.locker_function = "nobody";   \
-    } while (0)
-
-#define shadow_locked_by_me(_d)                     \
-    (current->processor == (_d)->arch.paging.shadow.locker)
-
-#define shadow_lock(_d)                                                       \
-    do {                                                                      \
-        if ( unlikely((_d)->arch.paging.shadow.locker == current->processor) )\
-        {                                                                     \
-            printk("Error: shadow lock held by %s\n",                         \
-                   (_d)->arch.paging.shadow.locker_function);                 \
-            BUG();                                                            \
-        }                                                                     \
-        spin_lock(&(_d)->arch.paging.shadow.lock);                            \
-        ASSERT((_d)->arch.paging.shadow.locker == -1);                        \
-        (_d)->arch.paging.shadow.locker = current->processor;                 \
-        (_d)->arch.paging.shadow.locker_function = __func__;                  \
-    } while (0)
-
-#define shadow_unlock(_d)                                              \
-    do {                                                               \
-        ASSERT((_d)->arch.paging.shadow.locker == current->processor); \
-        (_d)->arch.paging.shadow.locker = -1;                          \
-        (_d)->arch.paging.shadow.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.shadow.lock);                   \
-    } while (0)
-
 
 /* Size (in bytes) of a guest PTE */
 #if GUEST_PAGING_LEVELS >= 3
index 0488e655bb2d66f6ba347b31c5c03cc53036b7b5..2879480b0bb66c0db03911d02b76c1d8f7c93127 100644 (file)
@@ -91,9 +91,8 @@ void hypercall_page_initialise(struct domain *d, void *);
 /*          shadow paging extension             */
 /************************************************/
 struct shadow_domain {
-    spinlock_t        lock;  /* shadow domain lock */
-    int               locker; /* processor which holds the lock */
-    const char       *locker_function; /* Func that took it */
+    mm_lock_t         lock;  /* shadow domain lock */
+
     unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     struct page_list_head pinned_shadows;
 
@@ -159,9 +158,7 @@ struct shadow_vcpu {
 /*            hardware assisted paging          */
 /************************************************/
 struct hap_domain {
-    spinlock_t        lock;
-    int               locker;
-    const char       *locker_function;
+    mm_lock_t         lock;
 
     struct page_list_head freelist;
     unsigned int      total_pages;  /* number of pages allocated */
@@ -174,9 +171,7 @@ struct hap_domain {
 /************************************************/
 struct log_dirty_domain {
     /* log-dirty lock */
-    spinlock_t     lock;
-    int            locker; /* processor that holds the lock */
-    const char    *locker_function; /* func that took it */
+    mm_lock_t     lock;
 
     /* log-dirty radix tree to record dirty pages */
     mfn_t          top;
@@ -280,9 +275,7 @@ struct arch_domain
 
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
-    spinlock_t nested_p2m_lock;
-    int nested_p2m_locker;
-    const char *nested_p2m_function;
+    mm_lock_t nested_p2m_lock;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
index 4d01f1b5d9432d46c0f0405b4e615ea40d47d824..a2532a427164ca4d9ccd29aede3b763e4db69186 100644 (file)
@@ -46,41 +46,6 @@ hap_unmap_domain_page(void *p)
     unmap_domain_page(p);
 }
 
-/************************************************/
-/*           locking for hap code               */
-/************************************************/
-#define hap_lock_init(_d)                                   \
-    do {                                                    \
-        spin_lock_init(&(_d)->arch.paging.hap.lock);        \
-        (_d)->arch.paging.hap.locker = -1;                  \
-        (_d)->arch.paging.hap.locker_function = "nobody";   \
-    } while (0)
-
-#define hap_locked_by_me(_d)                     \
-    (current->processor == (_d)->arch.paging.hap.locker)
-
-#define hap_lock(_d)                                                       \
-    do {                                                                   \
-        if ( unlikely((_d)->arch.paging.hap.locker == current->processor) )\
-        {                                                                  \
-            printk("Error: hap lock held by %s\n",                         \
-                   (_d)->arch.paging.hap.locker_function);                 \
-            BUG();                                                         \
-        }                                                                  \
-        spin_lock(&(_d)->arch.paging.hap.lock);                            \
-        ASSERT((_d)->arch.paging.hap.locker == -1);                        \
-        (_d)->arch.paging.hap.locker = current->processor;                 \
-        (_d)->arch.paging.hap.locker_function = __func__;                  \
-    } while (0)
-
-#define hap_unlock(_d)                                              \
-    do {                                                            \
-        ASSERT((_d)->arch.paging.hap.locker == current->processor); \
-        (_d)->arch.paging.hap.locker = -1;                          \
-        (_d)->arch.paging.hap.locker_function = "nobody";           \
-        spin_unlock(&(_d)->arch.paging.hap.lock);                   \
-    } while (0)
-
 /************************************************/
 /*        hap domain level functions            */
 /************************************************/
index c93a022952f226a24247354fec2d70aeebf059f3..bc88a91ffb6d562862e889cf4ef3ab6cdc005819 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <xen/config.h>
 #include <xen/list.h>
+#include <xen/spinlock.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
@@ -597,4 +598,12 @@ unsigned long domain_get_maximum_gpfn(struct domain *d);
 
 extern struct domain *dom_xen, *dom_io, *dom_cow;      /* for vmcoreinfo */
 
+/* Definition of an mm lock: spinlock with extra fields for debugging */
+typedef struct mm_lock {
+    spinlock_t         lock; 
+    int                unlock_level;
+    int                locker;          /* processor which holds the lock */
+    const char        *locker_function; /* func that took it */
+} mm_lock_t;
+
 #endif /* __ASM_X86_MM_H__ */
index 5542e2eac20ccd00aa7b1029260e9712d6d7265c..1f92b0c20b4c58fb3630767d83ac16e58b5f1f36 100644 (file)
@@ -189,9 +189,7 @@ typedef enum {
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
-    spinlock_t         lock;
-    int                locker;   /* processor which holds the lock */
-    const char        *locker_function; /* Func that took it */
+    mm_lock_t          lock;
 
     /* Shadow translated domain: p2m mapping */
     pagetable_t        phys_table;
@@ -285,80 +283,6 @@ struct p2m_domain *p2m_get_p2m(struct vcpu *v);
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
 
-/*
- * The P2M lock.  This protects all updates to the p2m table.
- * Updates are expected to be safe against concurrent reads,
- * which do *not* require the lock.
- *
- * Locking discipline: always acquire this lock before the shadow or HAP one
- */
-
-#define p2m_lock_init(_p2m)                     \
-    do {                                        \
-        spin_lock_init(&(_p2m)->lock);          \
-        (_p2m)->locker = -1;                    \
-        (_p2m)->locker_function = "nobody";     \
-    } while (0)
-
-#define p2m_lock(_p2m)                                          \
-    do {                                                        \
-        if ( unlikely((_p2m)->locker == current->processor) )   \
-        {                                                       \
-            printk("Error: p2m lock held by %s\n",              \
-                   (_p2m)->locker_function);                    \
-            BUG();                                              \
-        }                                                       \
-        spin_lock(&(_p2m)->lock);                               \
-        ASSERT((_p2m)->locker == -1);                           \
-        (_p2m)->locker = current->processor;                    \
-        (_p2m)->locker_function = __func__;                     \
-    } while (0)
-
-#define p2m_unlock(_p2m)                                \
-    do {                                                \
-        ASSERT((_p2m)->locker == current->processor);   \
-        (_p2m)->locker = -1;                            \
-        (_p2m)->locker_function = "nobody";             \
-        spin_unlock(&(_p2m)->lock);                     \
-    } while (0)
-
-#define p2m_locked_by_me(_p2m)                            \
-    (current->processor == (_p2m)->locker)
-
-
-#define nestedp2m_lock_init(_domain)                                  \
-    do {                                                              \
-        spin_lock_init(&(_domain)->arch.nested_p2m_lock);             \
-        (_domain)->arch.nested_p2m_locker = -1;                       \
-        (_domain)->arch.nested_p2m_function = "nobody";               \
-    } while (0)
-
-#define nestedp2m_locked_by_me(_domain)                \
-    (current->processor == (_domain)->arch.nested_p2m_locker)
-
-#define nestedp2m_lock(_domain)                                       \
-    do {                                                              \
-        if ( nestedp2m_locked_by_me(_domain) )                        \
-        {                                                             \
-            printk("Error: p2m lock held by %s\n",                    \
-                   (_domain)->arch.nested_p2m_function);              \
-            BUG();                                                    \
-        }                                                             \
-        spin_lock(&(_domain)->arch.nested_p2m_lock);                  \
-        ASSERT((_domain)->arch.nested_p2m_locker == -1);              \
-        (_domain)->arch.nested_p2m_locker = current->processor;       \
-        (_domain)->arch.nested_p2m_function = __func__;               \
-    } while (0)
-
-#define nestedp2m_unlock(_domain)                                      \
-    do {                                                               \
-        ASSERT(nestedp2m_locked_by_me(_domain));                       \
-        (_domain)->arch.nested_p2m_locker = -1;                        \
-        (_domain)->arch.nested_p2m_function = "nobody";                \
-        spin_unlock(&(_domain)->arch.nested_p2m_lock);                 \
-    } while (0)
-
-
 /* Read a particular P2M table, mapping pages as we go.  Most callers
  * should _not_ call this directly; use the other gfn_to_mfn_* functions
  * below unless you know you want to walk a p2m that isn't a domain's