int p2m_alloc_table(struct p2m_domain *p2m);
/* Return all the p2m resources to Xen. */
-void p2m_teardown(struct p2m_domain *p2m, bool remove_root);
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root, bool *preempted);
void p2m_final_teardown(struct domain *d);
/* Add/remove a page to/from a domain's p2m table. */
if ( hvm_altp2m_supported() )
for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i], true);
+ p2m_teardown(d->arch.altp2m_p2m[i], true, NULL);
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
- p2m_teardown(d->arch.nested_p2m[i], true);
+ p2m_teardown(d->arch.nested_p2m[i], true, NULL);
}
if ( d->arch.paging.hap.total_pages != 0 )
hap_teardown(d, NULL);
- p2m_teardown(p2m_get_hostp2m(d), true);
+ p2m_teardown(p2m_get_hostp2m(d), true, NULL);
/* Free any memory that the p2m teardown released */
paging_lock(d);
hap_set_allocation(d, 0, NULL);
FREE_XENHEAP_PAGE(d->arch.altp2m_visible_eptp);
for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i], false);
+ {
+ p2m_teardown(d->arch.altp2m_p2m[i], false, preempted);
+ if ( preempted && *preempted )
+ return;
+ }
}
/* Destroy nestedp2m's after altp2m. */
for ( i = 0; i < MAX_NESTEDP2M; i++ )
- p2m_teardown(d->arch.nested_p2m[i], false);
+ {
+ p2m_teardown(d->arch.nested_p2m[i], false, preempted);
+ if ( preempted && *preempted )
+ return;
+ }
- p2m_teardown(p2m_get_hostp2m(d), false);
+ p2m_teardown(p2m_get_hostp2m(d), false, preempted);
+ if ( preempted && *preempted )
+ return;
paging_lock(d); /* Keep various asserts happy */
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <xen/event.h>
#include <xen/types.h>
#include <asm/p2m.h>
#include "mm-locks.h"
* hvm fixme: when adding support for pvh non-hardware domains, this path must
* cleanup any foreign p2m types (release refcnts on them).
*/
-void p2m_teardown(struct p2m_domain *p2m, bool remove_root)
+void p2m_teardown(struct p2m_domain *p2m, bool remove_root, bool *preempted)
{
#ifdef CONFIG_HVM
struct page_info *pg, *root_pg = NULL;
struct domain *d;
+ unsigned int i = 0;
if ( !p2m )
return;
}
while ( (pg = page_list_remove_head(&p2m->pages)) )
- if ( pg != root_pg )
- d->arch.paging.free_page(d, pg);
+ {
+ if ( pg == root_pg )
+ continue;
+
+ d->arch.paging.free_page(d, pg);
+
+ /* Arbitrarily check preemption every 1024 iterations */
+ if ( preempted && !(++i % 1024) && general_preempt_check() )
+ {
+ *preempted = true;
+ break;
+ }
+ }
if ( root_pg )
page_list_add(root_pg, &p2m->pages);
paging_unlock(d);
out_unlocked:
#ifdef CONFIG_HVM
+ /*
+ * This is fine to ignore the preemption here because only the root
+ * will be allocated by p2m_alloc_table().
+ */
if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
- p2m_teardown(p2m, true);
+ p2m_teardown(p2m, true, NULL);
#endif
if ( rv != 0 && pg != NULL )
{
for_each_vcpu ( d, v )
shadow_vcpu_teardown(v);
- p2m_teardown(p2m_get_hostp2m(d), false);
+ p2m_teardown(p2m_get_hostp2m(d), false, preempted);
+ if ( preempted && *preempted )
+ return;
paging_lock(d);
shadow_teardown(d, NULL);
/* It is now safe to pull down the p2m map. */
- p2m_teardown(p2m_get_hostp2m(d), true);
+ p2m_teardown(p2m_get_hostp2m(d), true, NULL);
/* Free any shadow memory that the p2m teardown released */
paging_lock(d);
shadow_set_allocation(d, 0, NULL);