static struct page_info *hap_alloc_p2m_page(struct domain *d)
{
struct page_info *pg;
- int do_locking;
/* This is called both from the p2m code (which never holds the
* hap lock) and the log-dirty code (which sometimes does). */
- do_locking = !hap_locked_by_me(d);
- if ( do_locking )
- hap_lock(d);
+ hap_lock_recursive(d);
pg = hap_alloc(d);
#if CONFIG_PAGING_LEVELS == 3
pg->count_info |= 1;
}
- if ( do_locking )
- hap_unlock(d);
+ hap_unlock(d);
return pg;
}
static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
{
- int do_locking;
-
/* This is called both from the p2m code (which never holds the
* hap lock) and the log-dirty code (which sometimes does). */
- do_locking = !hap_locked_by_me(d);
- if ( do_locking )
- hap_lock(d);
+ hap_lock_recursive(d);
ASSERT(page_get_owner(pg) == d);
/* Should have just the one ref we gave it in alloc_p2m_page() */
hap_free(d, page_to_mfn(pg));
ASSERT(d->arch.paging.hap.p2m_pages >= 0);
- if ( do_locking )
- hap_unlock(d);
+ hap_unlock(d);
}
/* Return the size of the pool, rounded up to the nearest MB */
l->unlock_level = 0;
}
-static inline void _mm_lock(mm_lock_t *l, const char *func, int level)
+static inline int mm_locked_by_me(mm_lock_t *l)
+{
+ return (l->lock.recurse_cpu == current->processor);
+}
+
+static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec)
{
- if ( unlikely(l->locker == current->processor) )
- panic("mm lock held by %s\n", l->locker_function);
/* If you see this crash, the numbers printed are lines in this file
* where the offending locks are declared. */
- if ( unlikely(this_cpu(mm_lock_level) >= level) )
- panic("mm locking order violation: %i >= %i\n",
+ if ( unlikely(this_cpu(mm_lock_level) > level) )
+ panic("mm locking order violation: %i > %i\n",
this_cpu(mm_lock_level), level);
- spin_lock(&l->lock);
- ASSERT(l->locker == -1);
- l->locker = current->processor;
- l->locker_function = func;
- l->unlock_level = this_cpu(mm_lock_level);
+ spin_lock_recursive(&l->lock);
+ if ( l->lock.recurse_cnt == 1 )
+ {
+ l->locker_function = func;
+ l->unlock_level = this_cpu(mm_lock_level);
+ }
+ else if ( (unlikely(!rec)) )
+ panic("mm lock already held by %s\n", l->locker_function);
this_cpu(mm_lock_level) = level;
}
/* This wrapper uses the line number to express the locking order below */
-#define declare_mm_lock(name) \
- static inline void mm_lock_##name(mm_lock_t *l, const char *func) \
- { _mm_lock(l, func, __LINE__); }
-/* This one captures the name of the calling function */
-#define mm_lock(name, l) mm_lock_##name(l, __func__)
+#define declare_mm_lock(name) \
+ static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
+ { _mm_lock(l, func, __LINE__, rec); }
+/* These capture the name of the calling function */
+#define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
+#define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
static inline void mm_unlock(mm_lock_t *l)
{
- ASSERT(l->locker == current->processor);
- l->locker = -1;
- l->locker_function = "nobody";
- this_cpu(mm_lock_level) = l->unlock_level;
- l->unlock_level = -1;
- spin_unlock(&l->lock);
-}
-
-static inline int mm_locked_by_me(mm_lock_t *l)
-{
- return (current->processor == l->locker);
+ if ( l->lock.recurse_cnt == 1 )
+ {
+ l->locker_function = "nobody";
+ this_cpu(mm_lock_level) = l->unlock_level;
+ }
+ spin_unlock_recursive(&l->lock);
}
/************************************************************************
* be safe against concurrent reads, which do *not* require the lock. */
declare_mm_lock(p2m)
-#define p2m_lock(p) mm_lock(p2m, &(p)->lock)
-#define p2m_unlock(p) mm_unlock(&(p)->lock)
-#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
+#define p2m_lock(p) mm_lock(p2m, &(p)->lock)
+#define p2m_lock_recursive(p) mm_lock_recursive(p2m, &(p)->lock)
+#define p2m_unlock(p) mm_unlock(&(p)->lock)
+#define p2m_locked_by_me(p) mm_locked_by_me(&(p)->lock)
/* Shadow lock (per-domain)
*
declare_mm_lock(shadow)
#define shadow_lock(d) mm_lock(shadow, &(d)->arch.paging.shadow.lock)
+#define shadow_lock_recursive(d) \
+ mm_lock_recursive(shadow, &(d)->arch.paging.shadow.lock)
#define shadow_unlock(d) mm_unlock(&(d)->arch.paging.shadow.lock)
#define shadow_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.shadow.lock)
declare_mm_lock(hap)
#define hap_lock(d) mm_lock(hap, &(d)->arch.paging.hap.lock)
+#define hap_lock_recursive(d) \
+ mm_lock_recursive(hap, &(d)->arch.paging.hap.lock)
#define hap_unlock(d) mm_unlock(&(d)->arch.paging.hap.lock)
#define hap_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.hap.lock)
ept_entry_t *entry, int order,
p2m_query_t q)
{
- /* Only take the lock if we don't already have it. Otherwise it
- * wouldn't be safe to do p2m lookups with the p2m lock held */
- int do_locking = !p2m_locked_by_me(p2m);
int r;
- if ( do_locking )
- p2m_lock(p2m);
+ /* This is called from the p2m lookups, which can happen with or
+ * without the lock hed. */
+ p2m_lock_recursive(p2m);
/* Check to make sure this is still PoD */
if ( entry->sa_p2mt != p2m_populate_on_demand )
{
- if ( do_locking )
- p2m_unlock(p2m);
+ p2m_unlock(p2m);
return 0;
}
r = p2m_pod_demand_populate(p2m, gfn, order, q);
- if ( do_locking )
- p2m_unlock(p2m);
+ p2m_unlock(p2m);
return r;
}
l1_pgentry_t *p2m_entry, int order,
p2m_query_t q)
{
- /* Only take the lock if we don't already have it. Otherwise it
- * wouldn't be safe to do p2m lookups with the p2m lock held */
- int do_locking = !p2m_locked_by_me(p2m);
int r;
- if ( do_locking )
- p2m_lock(p2m);
-
+ /* This is called from the p2m lookups, which can happen with or
+ * without the lock hed. */
+ p2m_lock_recursive(p2m);
audit_p2m(p2m, 1);
/* Check to make sure this is still PoD */
if ( p2m_flags_to_type(l1e_get_flags(*p2m_entry)) != p2m_populate_on_demand )
{
- if ( do_locking )
- p2m_unlock(p2m);
+ p2m_unlock(p2m);
return 0;
}
r = p2m_pod_demand_populate(p2m, gfn, order, q);
audit_p2m(p2m, 1);
- if ( do_locking )
- p2m_unlock(p2m);
+ p2m_unlock(p2m);
return r;
}
* will be made safe (TLB flush semantics); pages unsynced by this vcpu
* are brought back into sync and write-protected. If skip != 0, we try
* to avoid resyncing at all if we think we can get away with it. */
-void sh_resync_all(struct vcpu *v, int skip, int this, int others, int do_locking)
+void sh_resync_all(struct vcpu *v, int skip, int this, int others)
{
int idx;
struct vcpu *other;
SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id);
- ASSERT(do_locking || shadow_locked_by_me(v->domain));
+ ASSERT(shadow_locked_by_me(v->domain));
if ( !this )
goto resync_others;
- if ( do_locking )
- shadow_lock(v->domain);
-
/* First: resync all of this vcpu's oos pages */
for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
if ( mfn_valid(oos[idx]) )
oos[idx] = _mfn(INVALID_MFN);
}
- if ( do_locking )
- shadow_unlock(v->domain);
-
resync_others:
if ( !others )
return;
if ( v == other )
continue;
- if ( do_locking )
- shadow_lock(v->domain);
-
oos = other->arch.paging.shadow.oos;
oos_fixup = other->arch.paging.shadow.oos_fixup;
oos_snapshot = other->arch.paging.shadow.oos_snapshot;
_sh_resync(other, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
oos[idx] = _mfn(INVALID_MFN);
}
- }
-
- if ( do_locking )
- shadow_unlock(v->domain);
+ }
}
}
shadow_alloc_p2m_page(struct domain *d)
{
struct page_info *pg;
- int do_locking;
/* This is called both from the p2m code (which never holds the
* shadow lock) and the log-dirty code (which sometimes does). */
- do_locking = !shadow_locked_by_me(d);
- if ( do_locking )
- shadow_lock(d);
+ shadow_lock_recursive(d);
if ( d->arch.paging.shadow.total_pages
< shadow_min_acceptable_pages(d) + 1 )
{
- if ( do_locking )
- shadow_unlock(d);
+ shadow_unlock(d);
return NULL;
}
d->arch.paging.shadow.p2m_pages++;
d->arch.paging.shadow.total_pages--;
- if ( do_locking )
- shadow_unlock(d);
+ shadow_unlock(d);
/* Unlike shadow pages, mark p2m pages as owned by the domain.
* Marking the domain as the owner would normally allow the guest to
static void
shadow_free_p2m_page(struct domain *d, struct page_info *pg)
{
- int do_locking;
-
ASSERT(page_get_owner(pg) == d);
/* Should have just the one ref we gave it in alloc_p2m_page() */
if ( (pg->count_info & PGC_count_mask) != 1 )
/* This is called both from the p2m code (which never holds the
* shadow lock) and the log-dirty code (which sometimes does). */
- do_locking = !shadow_locked_by_me(d);
- if ( do_locking )
- shadow_lock(d);
+ shadow_lock_recursive(d);
shadow_free(d, page_to_mfn(pg));
d->arch.paging.shadow.p2m_pages--;
d->arch.paging.shadow.total_pages++;
- if ( do_locking )
- shadow_unlock(d);
+ shadow_unlock(d);
}
#if CONFIG_PAGING_LEVELS == 3
int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
{
struct page_info *page = mfn_to_page(gmfn);
- int expected_count, do_locking;
+ int expected_count;
/* Dispatch table for getting per-type functions */
static const hash_callback_t callbacks[SH_type_unused] = {
/* Although this is an externally visible function, we do not know
* whether the shadow lock will be held when it is called (since it
- * can be called via put_page_type when we clear a shadow l1e).
- * If the lock isn't held, take it for the duration of the call. */
- do_locking = !shadow_locked_by_me(v->domain);
- if ( do_locking ) shadow_lock(v->domain);
+ * can be called via put_page_type when we clear a shadow l1e).*/
+ shadow_lock_recursive(v->domain);
/* XXX TODO:
* Heuristics for finding the (probably) single mapping of this gmfn */
}
}
- if ( do_locking ) shadow_unlock(v->domain);
+ shadow_unlock(v->domain);
/* We killed at least one mapping, so must flush TLBs. */
return 1;
{
struct page_info *pg = mfn_to_page(gmfn);
mfn_t smfn;
- int do_locking;
unsigned char t;
/* Dispatch table for getting per-type functions: each level must
/* Although this is an externally visible function, we do not know
* whether the shadow lock will be held when it is called (since it
- * can be called via put_page_type when we clear a shadow l1e).
- * If the lock isn't held, take it for the duration of the call. */
- do_locking = !shadow_locked_by_me(v->domain);
- if ( do_locking ) shadow_lock(v->domain);
+ * can be called via put_page_type when we clear a shadow l1e).*/
+ shadow_lock_recursive(v->domain);
SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
/* Bail out now if the page is not shadowed */
if ( (pg->count_info & PGC_page_table) == 0 )
{
- if ( do_locking ) shadow_unlock(v->domain);
+ shadow_unlock(v->domain);
return;
}
* take a fault. */
flush_tlb_mask(v->domain->domain_dirty_cpumask);
- if ( do_locking ) shadow_unlock(v->domain);
+ shadow_unlock(v->domain);
}
static void
/* Need to resync all our pages now, because if a page goes out
* of sync with paging enabled and is resynced with paging
* disabled, the resync will go wrong. */
- shadow_resync_all(v, 0);
+ shadow_resync_all(v);
#endif /* OOS */
if ( !hvm_paging_enabled(v) )
ASSERT(d->is_dying);
ASSERT(d != current->domain);
- if ( !shadow_locked_by_me(d) )
- shadow_lock(d); /* Keep various asserts happy */
+ shadow_lock(d);
if ( shadow_mode_enabled(d) )
{
/* All pages walked are now pagetables. Safe to resync pages
in case level 4 or 3 shadows were set. */
if ( resync )
- shadow_resync_all(v, 0);
+ shadow_resync_all(v);
#endif
/* Now follow it down a level. Guaranteed to succeed. */
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
if ( mfn_valid(sl3mfn) )
- shadow_resync_all(v, 0);
+ shadow_resync_all(v);
#endif
}
l4e_propagate_from_guest(v, new_gl4e, sl3mfn, &new_sl4e, ft_prefetch);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
if ( mfn_valid(sl2mfn) )
- shadow_resync_all(v, 0);
+ shadow_resync_all(v);
#endif
}
l3e_propagate_from_guest(v, new_gl3e, sl2mfn, &new_sl3e, ft_prefetch);
return;
}
+ if ( do_locking ) shadow_lock(v->domain);
+
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Need to resync all the shadow entries on a TLB flush. Resync
* current vcpus OOS pages before switching to the new shadow
* tables so that the VA hint is still valid. */
- shadow_resync_current_vcpu(v, do_locking);
+ shadow_resync_current_vcpu(v);
#endif
- if ( do_locking ) shadow_lock(v->domain);
-
ASSERT(shadow_locked_by_me(v->domain));
ASSERT(v->arch.paging.mode);
v->arch.paging.last_write_emul_ok = 0;
#endif
- /* Release the lock, if we took it (otherwise it's the caller's problem) */
- if ( do_locking ) shadow_unlock(v->domain);
-
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Need to resync all the shadow entries on a TLB flush. We only
* update the shadows, leaving the pages out of sync. Also, we try
* to skip synchronization of shadows not mapped in the new
* tables. */
- shadow_sync_other_vcpus(v, do_locking);
+ shadow_sync_other_vcpus(v);
#endif
+ /* Release the lock, if we took it (otherwise it's the caller's problem) */
+ if ( do_locking ) shadow_unlock(v->domain);
}
/* Pull all out-of-sync shadows back into sync. If skip != 0, we try
* to avoid resyncing where we think we can get away with it. */
-void sh_resync_all(struct vcpu *v, int skip, int this, int others, int do_locking);
+void sh_resync_all(struct vcpu *v, int skip, int this, int others);
static inline void
-shadow_resync_all(struct vcpu *v, int do_locking)
+shadow_resync_all(struct vcpu *v)
{
- sh_resync_all(v,
- 0 /* skip */,
- 1 /* this */,
- 1 /* others */,
- do_locking);
+ sh_resync_all(v, 0 /* skip */, 1 /* this */, 1 /* others */);
}
static inline void
-shadow_resync_current_vcpu(struct vcpu *v, int do_locking)
+shadow_resync_current_vcpu(struct vcpu *v)
{
- sh_resync_all(v,
- 0 /* skip */,
- 1 /* this */,
- 0 /* others */,
- do_locking);
+ sh_resync_all(v, 0 /* skip */, 1 /* this */, 0 /* others */);
}
static inline void
-shadow_sync_other_vcpus(struct vcpu *v, int do_locking)
+shadow_sync_other_vcpus(struct vcpu *v)
{
- sh_resync_all(v,
- 1 /* skip */,
- 0 /* this */,
- 1 /* others */,
- do_locking);
+ sh_resync_all(v, 1 /* skip */, 0 /* this */, 1 /* others */);
}
void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn);