... to cover xenheap and PGC_extra pages.
PGC_extra pages are intended to hold data structures that are associated
with a domain and may be mapped by that domain. They should not be treated
as 'normal' guest pages (i.e. RAM or page tables). Hence, in many cases
where code currently tests is_xen_heap_page() it should also check for
the PGC_extra bit in 'count_info'.
This patch therefore defines is_special_page() to cover both cases and
converts tests of is_xen_heap_page() (or open coded tests of PGC_xen_heap)
to is_special_page() where the page is assigned to a domain.
Signed-off-by: Paul Durrant <paul@xen.org>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <julien@xen.org>
page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
if ( unlikely(!page) ||
- unlikely(is_xen_heap_page(page)) )
+ unlikely(is_special_page(page)) )
{
if ( unlikely(p2m_is_broken(t)) )
type = XEN_DOMCTL_PFINFO_BROKEN;
unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
int err;
- if ( is_xen_heap_page(page) )
+ if ( is_special_page(page) )
{
if ( write )
put_page_type(page);
{
page->count_info &= ~PGC_cacheattr_mask;
- BUG_ON(is_xen_heap_page(page));
+ BUG_ON(is_special_page(page));
rc = update_xen_mappings(mfn, 0);
}
rc = rc2;
}
- if ( likely(!is_xen_heap_page(page)) )
+ if ( likely(!is_special_page(page)) )
{
ASSERT((page->u.inuse.type_info &
(PGT_type_mask | PGT_count_mask)) == PGT_writable_page);
if ( !(owner = page_get_owner_and_reference(page)) )
goto fail;
- if ( owner != d || is_xen_heap_page(page) ||
- (page->count_info & PGC_extra) )
+ if ( owner != d || is_special_page(page) )
goto fail_put;
/*
prev_mfn = get_gfn(d, gfn_x(gpfn), &p2mt);
if ( mfn_valid(prev_mfn) )
{
- if ( is_xen_heap_mfn(prev_mfn) )
- /* Xen heap frames are simply unhooked from this phys slot. */
+ if ( is_special_page(mfn_to_page(prev_mfn)) )
+ /* Special pages are simply unhooked from this phys slot. */
rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
* pageable() predicate for this, due to it having the same properties
* that we want.
*/
- if ( !p2m_is_pageable(p2mt) || is_xen_heap_page(pg) )
+ if ( !p2m_is_pageable(p2mt) || is_special_page(pg) )
{
rc = -EINVAL;
goto err;
if ( !p2m_is_sharable(p2mt) )
goto out;
- /* Skip xen heap pages */
page = mfn_to_page(mfn);
- if ( !page || is_xen_heap_page(page) )
+ if ( !page || is_special_page(page) )
goto out;
/* Check if there are mem_access/remapped altp2m entries for this page */
n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
for ( k = 0, page = mfn_to_page(mfn); k < n; ++k, ++page )
- if ( !(page->count_info & PGC_allocated) ||
- (page->count_info & (PGC_page_table | PGC_xen_heap)) ||
+ if ( is_special_page(page) ||
+ !(page->count_info & PGC_allocated) ||
+ (page->count_info & PGC_page_table) ||
(page->count_info & PGC_count_mask) > max_ref )
goto out;
}
pg = mfn_to_page(mfns[i]);
/*
- * If this is ram, and not a pagetable or from the xen heap, and
+ * If this is ram, and not a pagetable or a special page, and
* probably not mapped elsewhere, map it; otherwise, skip.
*/
- if ( p2m_is_ram(types[i]) && (pg->count_info & PGC_allocated) &&
- !(pg->count_info & (PGC_page_table | PGC_xen_heap)) &&
+ if ( !is_special_page(pg) && p2m_is_ram(types[i]) &&
+ (pg->count_info & PGC_allocated) &&
+ !(pg->count_info & PGC_page_table) &&
((pg->count_info & PGC_count_mask) <= max_ref) )
map[i] = map_domain_page(mfns[i]);
else
prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
if ( mfn_valid(prev_mfn) )
{
- if ( is_xen_heap_mfn(prev_mfn) )
- /* Xen heap frames are simply unhooked from this phys slot */
+ if ( is_special_page(mfn_to_page(prev_mfn)) )
+ /* Special pages are simply unhooked from this phys slot */
rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
else
/* Normal domain memory is freed, to avoid leaking memory. */
* The qemu helper process has an untyped mapping of this dom's RAM
* and the HVM restore program takes another.
* Also allow one typed refcount for
- * - Xen heap pages, to match share_xen_page_with_guest(),
- * - ioreq server pages, to match prepare_ring_for_helper().
+ * - special pages, which are explicitly referenced and mapped by
+ * Xen.
+ * - ioreq server pages, which may be special pages or normal
+ * guest pages with an extra reference taken by
+ * prepare_ring_for_helper().
*/
if ( !(shadow_mode_external(d)
&& (page->count_info & PGC_count_mask) <= 3
&& ((page->u.inuse.type_info & PGT_count_mask)
- == (is_xen_heap_page(page) ||
+ == (is_special_page(page) ||
(is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn
- " (gfn %"PRI_gfn"): c=%lx t=%lx x=%d i=%d\n",
+ " (gfn %"PRI_gfn"): c=%lx t=%lx s=%d i=%d\n",
mfn_x(gmfn), gfn_x(gfn),
page->count_info, page->u.inuse.type_info,
- !!is_xen_heap_page(page),
+ is_special_page(page),
(is_hvm_domain(d) && is_ioreq_server_page(d, page)));
}
* caching attributes in the shadows to match what was asked for.
*/
if ( (level == 1) && is_hvm_domain(d) &&
- !is_xen_heap_mfn(target_mfn) )
+ (!mfn_valid(target_mfn) ||
+ !is_special_page(mfn_to_page(target_mfn))) )
{
int type;
if ( !mfn_valid(_mfn(mfn)) )
continue;
- if ( is_page_in_use(page) && !is_xen_heap_page(page) )
+ if ( is_page_in_use(page) && !is_special_page(page) )
{
if ( page->count_info & PGC_page_table )
{
+ 3 * PAGE_SIZE)) )
continue; /* skip tboot and its page tables */
- if ( is_page_in_use(page) && is_xen_heap_page(page) )
+ if ( is_page_in_use(page) && is_special_page(page) )
{
void *pg;
#include <asm/mm.h>
+static inline bool is_special_page(const struct page_info *page)
+{
+ return is_xen_heap_page(page) || (page->count_info & PGC_extra);
+}
+
#ifndef page_list_entry
struct page_list_head
{