From be2d8830237b12b00aad81d7aa10745055233db8 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 5 Feb 2013 11:31:08 +0000 Subject: [PATCH] xen/arm: implement domain_relinquish_resources put_page on every entry in xenpage_list and page_list Signed-off-by: Stefano Stabellini Acked-by: Ian Campbell Acked-by: Tim Deegan Committed-by: Ian Campbell --- xen/arch/arm/domain.c | 47 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index 48dce80ed8..0836c1692e 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -514,11 +515,51 @@ void arch_vcpu_reset(struct vcpu *v) vcpu_end_shutdown_deferral(v); } +static int relinquish_memory(struct domain *d, struct page_list_head *list) +{ + struct page_info *page, *tmp; + int ret = 0; + + /* Use a recursive lock, as we may enter 'free_domheap_page'. */ + spin_lock_recursive(&d->page_alloc_lock); + + page_list_for_each_safe( page, tmp, list ) + { + /* Grab a reference to the page so it won't disappear from under us. */ + if ( unlikely(!get_page(page, d)) ) + /* Couldn't get a reference -- someone is freeing this page. */ + BUG(); + + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) + put_page(page); + + put_page(page); + + if ( hypercall_preempt_check() ) + { + ret = -EAGAIN; + goto out; + } + } + + out: + spin_unlock_recursive(&d->page_alloc_lock); + return ret; +} + int domain_relinquish_resources(struct domain *d) { - /* XXX teardown pagetables, free pages etc */ - ASSERT(0); - return 0; + int ret = 0; + + ret = relinquish_memory(d, &d->xenpage_list); + if ( ret ) + return ret; + + ret = relinquish_memory(d, &d->page_list); + if ( ret ) + return ret; + + return ret; } void arch_dump_domain_info(struct domain *d) -- 2.30.2