int p2m_alloc_table(struct p2m_domain *p2m);
/* Return all the p2m resources to Xen. */
-void p2m_teardown(struct p2m_domain *p2m);
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root);
void p2m_final_teardown(struct domain *d);
/* Add/remove a page to/from a domain's p2m table. */
}
for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i]);
+ p2m_teardown(d->arch.altp2m_p2m[i], true);
}
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
- p2m_teardown(d->arch.nested_p2m[i]);
+ p2m_teardown(d->arch.nested_p2m[i], true);
}
if ( d->arch.paging.hap.total_pages != 0 )
hap_teardown(d, NULL);
- p2m_teardown(p2m_get_hostp2m(d));
+ p2m_teardown(p2m_get_hostp2m(d), true);
/* Free any memory that the p2m teardown released */
paging_lock(d);
hap_set_allocation(d, 0, NULL);
* hvm fixme: when adding support for pvh non-hardware domains, this path must
* cleanup any foreign p2m types (release refcnts on them).
*/
-void p2m_teardown(struct p2m_domain *p2m)
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root)
{
#ifdef CONFIG_HVM
- struct page_info *pg;
+ struct page_info *pg, *root_pg = NULL;
struct domain *d;
if ( !p2m )
ASSERT(atomic_read(&d->shr_pages) == 0);
#endif
- p2m->phys_table = pagetable_null();
+ if ( remove_root )
+ p2m->phys_table = pagetable_null();
+ else if ( !pagetable_is_null(p2m->phys_table) )
+ {
+ root_pg = pagetable_get_page(p2m->phys_table);
+ clear_domain_page(pagetable_get_mfn(p2m->phys_table));
+ }
while ( (pg = page_list_remove_head(&p2m->pages)) )
- d->arch.paging.free_page(d, pg);
+ if ( pg != root_pg )
+ d->arch.paging.free_page(d, pg);
+
+ if ( root_pg )
+ page_list_add(root_pg, &p2m->pages);
p2m_unlock(p2m);
#endif
out_unlocked:
#ifdef CONFIG_HVM
if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
- p2m_teardown(p2m);
+ p2m_teardown(p2m, true);
#endif
if ( rv != 0 && pg != NULL )
{
shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
- p2m_teardown(p2m_get_hostp2m(d));
+ p2m_teardown(p2m_get_hostp2m(d), true);
/* Free any shadow memory that the p2m teardown released */
paging_lock(d);
shadow_set_allocation(d, 0, NULL);