Conditionalize it and its uses accordingly.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tamas K Lengyel <tamas@tklengyel.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
}
else
{
+#ifdef CONFIG_MEM_PAGING
/*
* There is a chance we're plugging a hole where a paged out
* page was.
put_page(cpage);
}
}
+#endif
}
atomic_inc(&nr_saved_mfns);
/* Count how man PoD entries we'll be replacing if successful */
pod_count++;
}
+#ifdef CONFIG_MEM_PAGING
else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) )
{
/* We're plugging a hole in the physmap where a paged out page was */
atomic_dec(&d->paged_pages);
}
+#endif
}
/* Then, look for m->p mappings for this range and deal with them */
#ifdef CONFIG_MEM_SHARING
info->shr_pages = atomic_read(&d->shr_pages);
#endif
+#ifdef CONFIG_MEM_PAGING
info->paged_pages = atomic_read(&d->paged_pages);
+#endif
info->shared_info_frame =
gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info))));
BUG_ON(SHARED_M2P(info->shared_info_frame));
#ifdef CONFIG_MEM_SHARING
" shared_pages=%u"
#endif
+#ifdef CONFIG_MEM_PAGING
" paged_pages=%u"
+#endif
" dirty_cpus={%*pbl} max_pages=%u\n",
domain_tot_pages(d), d->xenheap_pages,
#ifdef CONFIG_MEM_SHARING
atomic_read(&d->shr_pages),
#endif
- atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
- d->max_pages);
+#ifdef CONFIG_MEM_PAGING
+ atomic_read(&d->paged_pages),
+#endif
+ CPUMASK_PR(d->dirty_cpumask), d->max_pages);
printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
atomic_t shr_pages; /* shared pages */
#endif
+#ifdef CONFIG_MEM_PAGING
atomic_t paged_pages; /* paged-out pages */
+#endif
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */