{
#ifdef CONFIG_HVM
struct page_info *pg;
-#endif
struct domain *d;
if ( !p2m )
p2m_lock(p2m);
+#ifdef CONFIG_MEM_SHARING
ASSERT(atomic_read(&d->shr_pages) == 0);
+#endif
-#ifdef CONFIG_HVM
p2m->phys_table = pagetable_null();
while ( (pg = page_list_remove_head(&p2m->pages)) )
d->arch.paging.free_page(d, pg);
-#endif
p2m_unlock(p2m);
+#endif
}
void p2m_final_teardown(struct domain *d)
info->tot_pages = domain_tot_pages(d);
info->max_pages = d->max_pages;
info->outstanding_pages = d->outstanding_pages;
+#ifdef CONFIG_MEM_SHARING
info->shr_pages = atomic_read(&d->shr_pages);
+#endif
info->paged_pages = atomic_read(&d->paged_pages);
info->shared_info_frame =
gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info))));
printk(" refcnt=%d dying=%d pause_count=%d\n",
atomic_read(&d->refcnt), d->is_dying,
atomic_read(&d->pause_count));
- printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
- "dirty_cpus={%*pbl} max_pages=%u\n",
- domain_tot_pages(d), d->xenheap_pages, atomic_read(&d->shr_pages),
+ printk(" nr_pages=%u xenheap_pages=%u"
+#ifdef CONFIG_MEM_SHARING
+ " shared_pages=%u"
+#endif
+ " paged_pages=%u"
+ " dirty_cpus={%*pbl} max_pages=%u\n",
+ domain_tot_pages(d), d->xenheap_pages,
+#ifdef CONFIG_MEM_SHARING
+ atomic_read(&d->shr_pages),
+#endif
atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
d->max_pages);
printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
unsigned int outstanding_pages; /* pages claimed but not possessed */
unsigned int max_pages; /* maximum value for domain_tot_pages() */
unsigned int extra_pages; /* pages not included in domain_tot_pages() */
+
+#ifdef CONFIG_MEM_SHARING
atomic_t shr_pages; /* shared pages */
+#endif
+
atomic_t paged_pages; /* paged-out pages */
/* Scheduling. */