#include <asm/p2m.h>
#include <asm/mem_event.h>
#include <asm/atomic.h>
+#include <xen/rcupdate.h>
#include "mm-locks.h"
#if MEM_SHARING_AUDIT
-static mm_lock_t shr_lock;
-
-#define shr_lock() _shr_lock()
-#define shr_unlock() _shr_unlock()
-#define shr_locked_by_me() _shr_locked_by_me()
-
static void mem_sharing_audit(void);
#define MEM_SHARING_DEBUG(_f, _a...) \
debugtrace_printk("mem_sharing_debug: %s(): " _f, __func__, ##_a)
static struct list_head shr_audit_list;
+static spinlock_t shr_audit_lock;
+DEFINE_RCU_READ_LOCK(shr_audit_read_lock);
-static inline void audit_add_list(struct page_info *page)
+/* RCU delayed free of audit list entry */
+static void _free_pg_shared_info(struct rcu_head *head)
{
- INIT_LIST_HEAD(&page->shared_info->entry);
- list_add(&page->shared_info->entry, &shr_audit_list);
+ xfree(container_of(head, struct page_sharing_info, rcu_head));
}
-static inline void audit_del_list(struct page_info *page)
+static inline void audit_add_list(struct page_info *page)
{
- list_del(&page->shared_info->entry);
+ INIT_LIST_HEAD(&page->shared_info->entry);
+ spin_lock(&shr_audit_lock);
+ list_add_rcu(&page->shared_info->entry, &shr_audit_list);
+ spin_unlock(&shr_audit_lock);
}
-static inline int mem_sharing_page_lock(struct page_info *p)
+static inline void audit_del_list(struct page_info *page)
{
- return 1;
+ spin_lock(&shr_audit_lock);
+ list_del_rcu(&page->shared_info->entry);
+ spin_unlock(&shr_audit_lock);
+ INIT_RCU_HEAD(&page->shared_info->rcu_head);
+ call_rcu(&page->shared_info->rcu_head, _free_pg_shared_info);
}
-#define mem_sharing_page_unlock(p) ((void)0)
-#define get_next_handle() next_handle++;
#else
-#define shr_lock() ((void)0)
-#define shr_unlock() ((void)0)
-/* Only used inside audit code */
-//#define shr_locked_by_me() ((void)0)
-
#define mem_sharing_audit() ((void)0)
#define audit_add_list(p) ((void)0)
-#define audit_del_list(p) ((void)0)
+static inline void audit_del_list(struct page_info *page)
+{
+ xfree(page->shared_info);
+}
+
+#endif /* MEM_SHARING_AUDIT */
static inline int mem_sharing_page_lock(struct page_info *pg)
{
while ( (y = cmpxchg(&next_handle, x, x + 1)) != x );
return x + 1;
}
-#endif /* MEM_SHARING_AUDIT */
#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
unsigned long count_found = 0;
struct list_head *ae;
- ASSERT(shr_locked_by_me());
count_expected = atomic_read(&nr_shared_mfns);
- list_for_each(ae, &shr_audit_list)
+ rcu_read_lock(&shr_audit_read_lock);
+
+ list_for_each_rcu(ae, &shr_audit_list)
{
struct page_sharing_info *shared_info;
unsigned long nr_gfns = 0;
pg = shared_info->pg;
mfn = page_to_mfn(pg);
+ /* If we can't lock it, it's definitely not a shared page */
+ if ( !mem_sharing_page_lock(pg) )
+ {
+ MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked (%lx)!\n",
+ mfn_x(mfn), pg->u.inuse.type_info);
+ errors++;
+ continue;
+ }
+
/* Check if the MFN has correct type, owner and handle. */
if ( !(pg->u.inuse.type_info & PGT_shared_page) )
{
put_domain(d);
nr_gfns++;
}
- if ( nr_gfns != (pg->u.inuse.type_info & PGT_count_mask) )
+ /* The type count has an extra ref because we have locked the page */
+ if ( (nr_gfns + 1) != (pg->u.inuse.type_info & PGT_count_mask) )
{
MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx."
"nr_gfns in list %lu, in type_info %lx\n",
(pg->u.inuse.type_info & PGT_count_mask));
errors++;
}
+
+ mem_sharing_page_unlock(pg);
}
+ rcu_read_unlock(&shr_audit_read_lock);
+
if ( count_found != count_expected )
{
MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.",
spin_lock(&d->page_alloc_lock);
/* We can only change the type if count is one */
- /* If we are locking pages individually, then we need to drop
+ /* Because we are locking pages individually, we need to drop
* the lock here, while the page is typed. We cannot risk the
* race of page_unlock and then put_page_type. */
-#if MEM_SHARING_AUDIT
- expected_type = (PGT_shared_page | PGT_validated | 1);
-#else
expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
-#endif
if ( page->u.inuse.type_info != expected_type )
{
put_page(page);
/* Drop the final typecount */
put_page_and_type(page);
-#ifndef MEM_SHARING_AUDIT
/* Now that we've dropped the type, we can unlock */
mem_sharing_page_unlock(page);
-#endif
/* Change the owner */
ASSERT(page_get_owner(page) == dom_cow);
*phandle = 0UL;
- shr_lock();
mfn = get_gfn(d, gfn, &p2mt);
/* Check if mfn is valid */
out:
put_gfn(d, gfn);
- shr_unlock();
return ret;
}
mfn_t smfn, cmfn;
p2m_type_t smfn_type, cmfn_type;
- shr_lock();
-
/* XXX if sd == cd handle potential deadlock by ordering
* the get_ and put_gfn's */
smfn = get_gfn(sd, sgfn, &smfn_type);
/* Clear the rest of the shared state */
audit_del_list(cpage);
- xfree(cpage->shared_info);
cpage->shared_info = NULL;
mem_sharing_page_unlock(secondpg);
err_out:
put_gfn(cd, cgfn);
put_gfn(sd, sgfn);
- shr_unlock();
return ret;
}
gfn_info_t *gfn_info = NULL;
struct list_head *le;
- /* This is one of the reasons why we can't enforce ordering
- * between shr_lock and p2m fine-grained locks in mm-lock.
- * Callers may walk in here already holding the lock for this gfn */
- shr_lock();
mem_sharing_audit();
mfn = get_gfn(d, gfn, &p2mt);
/* Has someone already unshared it? */
if ( !p2m_is_shared(p2mt) ) {
put_gfn(d, gfn);
- shr_unlock();
return 0;
}
{
/* Clean up shared state */
audit_del_list(page);
- xfree(page->shared_info);
page->shared_info = NULL;
atomic_dec(&nr_shared_mfns);
}
test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
put_gfn(d, gfn);
- shr_unlock();
return 0;
}
mem_sharing_page_unlock(old_page);
put_gfn(d, gfn);
mem_sharing_notify_helper(d, gfn);
- shr_unlock();
return -ENOMEM;
}
paging_mark_dirty(d, mfn_x(page_to_mfn(page)));
/* We do not need to unlock a private page */
put_gfn(d, gfn);
- shr_unlock();
return 0;
}
break;
}
- shr_lock();
mem_sharing_audit();
- shr_unlock();
return rc;
}
{
printk("Initing memory sharing.\n");
#if MEM_SHARING_AUDIT
- mm_lock_init(&shr_lock);
+ spin_lock_init(&shr_audit_lock);
INIT_LIST_HEAD(&shr_audit_list);
#endif
}