return;
ASSERT(local_irq_is_enabled());
- ASSERT(p2m_locked_by_me(p2m_get_hostp2m(d)));
/*
* Flush active cpus synchronously. Flush others the next time this domain
/************************************************/
void hap_domain_init(struct domain *d)
{
- hap_lock_init(d);
+ mm_lock_init(&d->arch.paging.hap.lock);
INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
}
#ifndef __HAP_PRIVATE_H__
#define __HAP_PRIVATE_H__
+#include "../mm-locks.h"
+
/********************************************/
/* GUEST TRANSLATION FUNCS */
/********************************************/
#include <asm/mem_event.h>
#include <asm/atomic.h>
+#include "mm-locks.h"
+
/* Auditing of memory sharing code? */
#define MEM_SHARING_AUDIT 0
struct list_head list;
} gfn_info_t;
-typedef struct shr_lock
-{
- spinlock_t lock; /* mem sharing lock */
- int locker; /* processor which holds the lock */
- const char *locker_function; /* func that took it */
-} shr_lock_t;
-static shr_lock_t shr_lock;
+static mm_lock_t shr_lock;
/* Returns true if list has only one entry. O(1) complexity. */
static inline int list_has_one_entry(struct list_head *head)
return list_entry(list->next, struct gfn_info, list);
}
-#define shr_lock_init(_i) \
- do { \
- spin_lock_init(&shr_lock.lock); \
- shr_lock.locker = -1; \
- shr_lock.locker_function = "nobody"; \
- } while (0)
-
-#define shr_locked_by_me(_i) \
- (current->processor == shr_lock.locker)
-
-#define shr_lock(_i) \
- do { \
- if ( unlikely(shr_lock.locker == current->processor) ) \
- { \
- printk("Error: shr lock held by %s\n", \
- shr_lock.locker_function); \
- BUG(); \
- } \
- spin_lock(&shr_lock.lock); \
- ASSERT(shr_lock.locker == -1); \
- shr_lock.locker = current->processor; \
- shr_lock.locker_function = __func__; \
- } while (0)
-
-#define shr_unlock(_i) \
- do { \
- ASSERT(shr_lock.locker == current->processor); \
- shr_lock.locker = -1; \
- shr_lock.locker_function = "nobody"; \
- spin_unlock(&shr_lock.lock); \
- } while (0)
-
static void __init mem_sharing_hash_init(void)
{
int i;
- shr_lock_init();
+ mm_lock_init(&shr_lock);
for(i=0; i<SHR_HASH_LENGTH; i++)
shr_hash[i] = NULL;
}
--- /dev/null
+/******************************************************************************
+ * arch/x86/mm/mm-locks.h
+ *
+ * Spinlocks used by the code in arch/x86/mm.
+ *
+ * Copyright (c) 2011 Citrix Systems, inc.
+ * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
+ * Copyright (c) 2006-2007 XenSource Inc.
+ * Copyright (c) 2006 Michael A Fetterman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _MM_LOCKS_H
+#define _MM_LOCKS_H
+
+/* Per-CPU variable for enforcing the lock ordering */
+DECLARE_PER_CPU(int, mm_lock_level);
+
+static inline void mm_lock_init(mm_lock_t *l)
+{
+ spin_lock_init(&l->lock);
+ l->locker = -1;
+ l->locker_function = "nobody";
+ l->unlock_level = 0;
+}
+
+static inline void _mm_lock(mm_lock_t *l, const char *func, int level)
+{
+ if ( unlikely(l->locker == current->processor) )
+ panic("mm lock held by %s\n", l->locker_function);
+ /* If you see this crash, the numbers printed are lines in this file
+ * where the offending locks are declared. */
+ if ( unlikely(this_cpu(mm_lock_level) >= level) )
+ panic("mm locking order violation: %i >= %i\n",
+ this_cpu(mm_lock_level), level);
+ spin_lock(&l->lock);
+ ASSERT(l->locker == -1);
+ l->locker = current->processor;
+ l->locker_function = func;
+ l->unlock_level = this_cpu(mm_lock_level);
+ this_cpu(mm_lock_level) = level;
+}
+/* This wrapper uses the line number to express the locking order below */
+#define declare_mm_lock(name) \
+ static inline void mm_lock_##name(mm_lock_t *l, const char *func) \
+ { _mm_lock(l, func, __LINE__); }
+/* This one captures the name of the calling function */
+#define mm_lock(name, l) mm_lock_##name(l, __func__)
+
+static inline void mm_unlock(mm_lock_t *l)
+{
+ ASSERT(l->locker == current->processor);
+ l->locker = -1;
+ l->locker_function = "nobody";
+ this_cpu(mm_lock_level) = l->unlock_level;
+ l->unlock_level = -1;
+ spin_unlock(&l->lock);
+}
+
+static inline int mm_locked_by_me(mm_lock_t *l)
+{
+ return (current->processor == l->locker);
+}
+
+/************************************************************************
+ * *
+ * To avoid deadlocks, these locks _MUST_ be taken in the order they're *
+ * declared in this file. The locking functions will enforce this. *
+ * *
+ ************************************************************************/
+
+/* Page-sharing lock (global)
+ *
+ * A single global lock that protects the memory-sharing code's
+ * hash tables. */
+
+declare_mm_lock(shr)
+#define shr_lock() mm_lock(shr, &shr_lock)
+#define shr_unlock() mm_unlock(&shr_lock)
+#define shr_locked_by_me() mm_locked_by_me(&shr_lock)
+
+/* Nested P2M lock (per-domain)
+ *
+ * A per-domain lock that protects some of the nested p2m datastructures.
+ * TODO: find out exactly what needs to be covered by this lock */
+
+declare_mm_lock(nestedp2m)
+#define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
+#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
+
+/* P2M lock (per-p2m-table)
+ *
+ * This protects all updates to the p2m table. Updates are expected to
+ * be safe against concurrent reads, which do *not* require the lock. */
+
+declare_mm_lock(p2m)
+#define p2m_lock(p) mm_lock(p2m, &(p)->lock)
+#define p2m_unlock(p) mm_unlock(&(p)->lock)
+#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
+
+/* Shadow lock (per-domain)
+ *
+ * This lock is intended to allow us to make atomic updates to the
+ * software TLB that the shadow pagetables provide.
+ *
+ * Specifically, it protects:
+ * - all changes to shadow page table pages
+ * - the shadow hash table
+ * - the shadow page allocator
+ * - all changes to guest page table pages
+ * - all changes to the page_info->tlbflush_timestamp
+ * - the page_info->count fields on shadow pages */
+
+declare_mm_lock(shadow)
+#define shadow_lock(d) mm_lock(shadow, &(d)->arch.paging.shadow.lock)
+#define shadow_unlock(d) mm_unlock(&(d)->arch.paging.shadow.lock)
+#define shadow_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.shadow.lock)
+
+/* HAP lock (per-domain)
+ *
+ * Equivalent of the shadow lock for HAP. Protects updates to the
+ * NPT and EPT tables, and the HAP page allocator. */
+
+declare_mm_lock(hap)
+#define hap_lock(d) mm_lock(hap, &(d)->arch.paging.hap.lock)
+#define hap_unlock(d) mm_unlock(&(d)->arch.paging.hap.lock)
+#define hap_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.hap.lock)
+
+/* Log-dirty lock (per-domain)
+ *
+ * Protects the log-dirty bitmap from concurrent accesses (and teardowns, etc).
+ *
+ * Because mark_dirty is called from a lot of places, the log-dirty lock
+ * may be acquired with the shadow or HAP locks already held. When the
+ * log-dirty code makes callbacks into HAP or shadow code to reset
+ * various traps that will trigger the mark_dirty calls, it must *not*
+ * have the log-dirty lock held, or it risks deadlock. Because the only
+ * purpose of those calls is to make sure that *guest* actions will
+ * cause mark_dirty to be called (hypervisor actions explictly call it
+ * anyway), it is safe to release the log-dirty lock before the callback
+ * as long as the domain is paused for the entire operation. */
+
+declare_mm_lock(log_dirty)
+#define log_dirty_lock(d) mm_lock(log_dirty, &(d)->arch.paging.log_dirty.lock)
+#define log_dirty_unlock(d) mm_unlock(&(d)->arch.paging.log_dirty.lock)
+
+
+#endif /* _MM_LOCKS_H */
#include <xen/keyhandler.h>
#include <xen/softirq.h>
+#include "mm-locks.h"
+
#define atomic_read_ept_entry(__pepte) \
( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
#define atomic_write_ept_entry(__pepte, __epte) \
#include <xen/event.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
-
+
+#include "mm-locks.h"
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
/* After this barrier no new PoD activities can happen. */
BUG_ON(!d->is_dying);
- spin_barrier(&p2m->lock);
+ spin_barrier(&p2m->lock.lock);
spin_lock(&d->page_alloc_lock);
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include "mm-locks.h"
+
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include "mm-locks.h"
+
/* turn on/off 1GB host page table support for hap, default on */
static bool_t __read_mostly opt_hap_1gb = 1;
boolean_param("hap_1gb", opt_hap_1gb);
static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
{
memset(p2m, 0, sizeof(*p2m));
- p2m_lock_init(p2m);
+ mm_lock_init(&p2m->lock);
INIT_PAGE_LIST_HEAD(&p2m->pages);
INIT_PAGE_LIST_HEAD(&p2m->pod.super);
INIT_PAGE_LIST_HEAD(&p2m->pod.single);
uint8_t i;
struct p2m_domain *p2m;
- nestedp2m_lock_init(d);
+ mm_lock_init(&d->arch.nested_p2m_lock);
for (i = 0; i < MAX_NESTEDP2M; i++) {
d->arch.nested_p2m[i] = p2m = xmalloc(struct p2m_domain);
if (p2m == NULL)
#include <xen/numa.h>
#include <xsm/xsm.h>
+#include "mm-locks.h"
+
/* Printouts */
#define PAGING_PRINTK(_f, _a...) \
debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
} while (0)
-/************************************************/
-/* LOG DIRTY SUPPORT */
-/************************************************/
+/* Per-CPU variable for enforcing the lock ordering */
+DEFINE_PER_CPU(int, mm_lock_level);
+
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-/* The log-dirty lock. This protects the log-dirty bitmap from
- * concurrent accesses (and teardowns, etc).
- *
- * Locking discipline: always acquire shadow or HAP lock before this one.
- *
- * Because mark_dirty is called from a lot of places, the log-dirty lock
- * may be acquired with the shadow or HAP locks already held. When the
- * log-dirty code makes callbacks into HAP or shadow code to reset
- * various traps that will trigger the mark_dirty calls, it must *not*
- * have the log-dirty lock held, or it risks deadlock. Because the only
- * purpose of those calls is to make sure that *guest* actions will
- * cause mark_dirty to be called (hypervisor actions explictly call it
- * anyway), it is safe to release the log-dirty lock before the callback
- * as long as the domain is paused for the entire operation. */
-
-#define log_dirty_lock_init(_d) \
- do { \
- spin_lock_init(&(_d)->arch.paging.log_dirty.lock); \
- (_d)->arch.paging.log_dirty.locker = -1; \
- (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
- } while (0)
-
-#define log_dirty_lock(_d) \
- do { \
- if (unlikely((_d)->arch.paging.log_dirty.locker==current->processor))\
- { \
- printk("Error: paging log dirty lock held by %s\n", \
- (_d)->arch.paging.log_dirty.locker_function); \
- BUG(); \
- } \
- spin_lock(&(_d)->arch.paging.log_dirty.lock); \
- ASSERT((_d)->arch.paging.log_dirty.locker == -1); \
- (_d)->arch.paging.log_dirty.locker = current->processor; \
- (_d)->arch.paging.log_dirty.locker_function = __func__; \
- } while (0)
-
-#define log_dirty_unlock(_d) \
- do { \
- ASSERT((_d)->arch.paging.log_dirty.locker == current->processor); \
- (_d)->arch.paging.log_dirty.locker = -1; \
- (_d)->arch.paging.log_dirty.locker_function = "nobody"; \
- spin_unlock(&(_d)->arch.paging.log_dirty.lock); \
- } while (0)
+/************************************************/
+/* LOG DIRTY SUPPORT */
+/************************************************/
static mfn_t paging_new_log_dirty_page(struct domain *d)
{
void (*clean_dirty_bitmap)(struct domain *d))
{
/* We initialize log dirty lock first */
- log_dirty_lock_init(d);
+ mm_lock_init(&d->arch.paging.log_dirty.lock);
d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
* Called for every domain from arch_domain_create() */
void shadow_domain_init(struct domain *d, unsigned int domcr_flags)
{
- shadow_lock_init(d);
+ mm_lock_init(&d->arch.paging.shadow.lock);
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
if ( unlikely(shadow_locked_by_me(d)) )
{
SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
- d->arch.paging.shadow.locker_function);
+ d->arch.paging.shadow.lock.locker_function);
return 0;
}
#include <asm/x86_emulate.h>
#include <asm/hvm/support.h>
+#include "../mm-locks.h"
/******************************************************************************
* Levels of self-test and paranoia
TRCE_SFLAG_OOS_FIXUP_EVICT,
};
-/******************************************************************************
- * The shadow lock.
- *
- * This lock is per-domain. It is intended to allow us to make atomic
- * updates to the software TLB that the shadow tables provide.
- *
- * Specifically, it protects:
- * - all changes to shadow page table pages
- * - the shadow hash table
- * - the shadow page allocator
- * - all changes to guest page table pages
- * - all changes to the page_info->tlbflush_timestamp
- * - the page_info->count fields on shadow pages
- * - the shadow dirty bit array and count
- */
-#ifndef CONFIG_SMP
-#error shadow.h currently requires CONFIG_SMP
-#endif
-
-#define shadow_lock_init(_d) \
- do { \
- spin_lock_init(&(_d)->arch.paging.shadow.lock); \
- (_d)->arch.paging.shadow.locker = -1; \
- (_d)->arch.paging.shadow.locker_function = "nobody"; \
- } while (0)
-
-#define shadow_locked_by_me(_d) \
- (current->processor == (_d)->arch.paging.shadow.locker)
-
-#define shadow_lock(_d) \
- do { \
- if ( unlikely((_d)->arch.paging.shadow.locker == current->processor) )\
- { \
- printk("Error: shadow lock held by %s\n", \
- (_d)->arch.paging.shadow.locker_function); \
- BUG(); \
- } \
- spin_lock(&(_d)->arch.paging.shadow.lock); \
- ASSERT((_d)->arch.paging.shadow.locker == -1); \
- (_d)->arch.paging.shadow.locker = current->processor; \
- (_d)->arch.paging.shadow.locker_function = __func__; \
- } while (0)
-
-#define shadow_unlock(_d) \
- do { \
- ASSERT((_d)->arch.paging.shadow.locker == current->processor); \
- (_d)->arch.paging.shadow.locker = -1; \
- (_d)->arch.paging.shadow.locker_function = "nobody"; \
- spin_unlock(&(_d)->arch.paging.shadow.lock); \
- } while (0)
-
/* Size (in bytes) of a guest PTE */
#if GUEST_PAGING_LEVELS >= 3
/* shadow paging extension */
/************************************************/
struct shadow_domain {
- spinlock_t lock; /* shadow domain lock */
- int locker; /* processor which holds the lock */
- const char *locker_function; /* Func that took it */
+ mm_lock_t lock; /* shadow domain lock */
+
unsigned int opt_flags; /* runtime tunable optimizations on/off */
struct page_list_head pinned_shadows;
/* hardware assisted paging */
/************************************************/
struct hap_domain {
- spinlock_t lock;
- int locker;
- const char *locker_function;
+ mm_lock_t lock;
struct page_list_head freelist;
unsigned int total_pages; /* number of pages allocated */
/************************************************/
struct log_dirty_domain {
/* log-dirty lock */
- spinlock_t lock;
- int locker; /* processor that holds the lock */
- const char *locker_function; /* func that took it */
+ mm_lock_t lock;
/* log-dirty radix tree to record dirty pages */
mfn_t top;
/* nestedhvm: translate l2 guest physical to host physical */
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
- spinlock_t nested_p2m_lock;
- int nested_p2m_locker;
- const char *nested_p2m_function;
+ mm_lock_t nested_p2m_lock;
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
struct radix_tree_root irq_pirq;
unmap_domain_page(p);
}
-/************************************************/
-/* locking for hap code */
-/************************************************/
-#define hap_lock_init(_d) \
- do { \
- spin_lock_init(&(_d)->arch.paging.hap.lock); \
- (_d)->arch.paging.hap.locker = -1; \
- (_d)->arch.paging.hap.locker_function = "nobody"; \
- } while (0)
-
-#define hap_locked_by_me(_d) \
- (current->processor == (_d)->arch.paging.hap.locker)
-
-#define hap_lock(_d) \
- do { \
- if ( unlikely((_d)->arch.paging.hap.locker == current->processor) )\
- { \
- printk("Error: hap lock held by %s\n", \
- (_d)->arch.paging.hap.locker_function); \
- BUG(); \
- } \
- spin_lock(&(_d)->arch.paging.hap.lock); \
- ASSERT((_d)->arch.paging.hap.locker == -1); \
- (_d)->arch.paging.hap.locker = current->processor; \
- (_d)->arch.paging.hap.locker_function = __func__; \
- } while (0)
-
-#define hap_unlock(_d) \
- do { \
- ASSERT((_d)->arch.paging.hap.locker == current->processor); \
- (_d)->arch.paging.hap.locker = -1; \
- (_d)->arch.paging.hap.locker_function = "nobody"; \
- spin_unlock(&(_d)->arch.paging.hap.lock); \
- } while (0)
-
/************************************************/
/* hap domain level functions */
/************************************************/
#include <xen/config.h>
#include <xen/list.h>
+#include <xen/spinlock.h>
#include <asm/io.h>
#include <asm/uaccess.h>
extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
+/* Definition of an mm lock: spinlock with extra fields for debugging */
+typedef struct mm_lock {
+ spinlock_t lock;
+ int unlock_level;
+ int locker; /* processor which holds the lock */
+ const char *locker_function; /* func that took it */
+} mm_lock_t;
+
#endif /* __ASM_X86_MM_H__ */
/* Per-p2m-table state */
struct p2m_domain {
/* Lock that protects updates to the p2m */
- spinlock_t lock;
- int locker; /* processor which holds the lock */
- const char *locker_function; /* Func that took it */
+ mm_lock_t lock;
/* Shadow translated domain: p2m mapping */
pagetable_t phys_table;
#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
-/*
- * The P2M lock. This protects all updates to the p2m table.
- * Updates are expected to be safe against concurrent reads,
- * which do *not* require the lock.
- *
- * Locking discipline: always acquire this lock before the shadow or HAP one
- */
-
-#define p2m_lock_init(_p2m) \
- do { \
- spin_lock_init(&(_p2m)->lock); \
- (_p2m)->locker = -1; \
- (_p2m)->locker_function = "nobody"; \
- } while (0)
-
-#define p2m_lock(_p2m) \
- do { \
- if ( unlikely((_p2m)->locker == current->processor) ) \
- { \
- printk("Error: p2m lock held by %s\n", \
- (_p2m)->locker_function); \
- BUG(); \
- } \
- spin_lock(&(_p2m)->lock); \
- ASSERT((_p2m)->locker == -1); \
- (_p2m)->locker = current->processor; \
- (_p2m)->locker_function = __func__; \
- } while (0)
-
-#define p2m_unlock(_p2m) \
- do { \
- ASSERT((_p2m)->locker == current->processor); \
- (_p2m)->locker = -1; \
- (_p2m)->locker_function = "nobody"; \
- spin_unlock(&(_p2m)->lock); \
- } while (0)
-
-#define p2m_locked_by_me(_p2m) \
- (current->processor == (_p2m)->locker)
-
-
-#define nestedp2m_lock_init(_domain) \
- do { \
- spin_lock_init(&(_domain)->arch.nested_p2m_lock); \
- (_domain)->arch.nested_p2m_locker = -1; \
- (_domain)->arch.nested_p2m_function = "nobody"; \
- } while (0)
-
-#define nestedp2m_locked_by_me(_domain) \
- (current->processor == (_domain)->arch.nested_p2m_locker)
-
-#define nestedp2m_lock(_domain) \
- do { \
- if ( nestedp2m_locked_by_me(_domain) ) \
- { \
- printk("Error: p2m lock held by %s\n", \
- (_domain)->arch.nested_p2m_function); \
- BUG(); \
- } \
- spin_lock(&(_domain)->arch.nested_p2m_lock); \
- ASSERT((_domain)->arch.nested_p2m_locker == -1); \
- (_domain)->arch.nested_p2m_locker = current->processor; \
- (_domain)->arch.nested_p2m_function = __func__; \
- } while (0)
-
-#define nestedp2m_unlock(_domain) \
- do { \
- ASSERT(nestedp2m_locked_by_me(_domain)); \
- (_domain)->arch.nested_p2m_locker = -1; \
- (_domain)->arch.nested_p2m_function = "nobody"; \
- spin_unlock(&(_domain)->arch.nested_p2m_lock); \
- } while (0)
-
-
/* Read a particular P2M table, mapping pages as we go. Most callers
* should _not_ call this directly; use the other gfn_to_mfn_* functions
* below unless you know you want to walk a p2m that isn't a domain's