ASSERT(!shadow_mode_refcounts(owner));
gmfn = mfn_to_gmfn(owner, mfn_x(page_to_mfn(page)));
- ASSERT(VALID_M2P(gmfn));
- /* Page sharing not supported for shadowed domains */
- if(!SHARED_M2P(gmfn))
+ if ( VALID_M2P(gmfn) )
shadow_remove_all_shadows(owner, _mfn(gmfn));
}
/* Unmap from old location, if any. */
old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
- ASSERT( old_gpfn != SHARED_M2P_ENTRY );
+ ASSERT(!SHARED_M2P(old_gpfn));
if ( space == XENMAPSPACE_gmfn && old_gpfn != gfn )
{
rc = -EXDEV;
unsigned long t = read_atomic(&page->u.inuse.type_info);
ASSERT((t & PGT_type_mask) == PGT_shared_page);
ASSERT((t & PGT_count_mask) >= 2);
- ASSERT(get_gpfn_from_mfn(mfn) == SHARED_M2P_ENTRY);
+ ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
return page;
}
}
}
/* Check the m2p entry */
- if ( get_gpfn_from_mfn(mfn_x(mfn)) != SHARED_M2P_ENTRY )
+ if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
{
MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
{
m2pfn = get_gpfn_from_mfn(mfn+i1);
/* Allow shared M2Ps */
- if ( (m2pfn != (gfn + i1)) &&
- (m2pfn != SHARED_M2P_ENTRY) )
+ if ( (m2pfn != (gfn + i1)) && !SHARED_M2P(m2pfn) )
{
pmbad++;
P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
continue;
}
- if ( gfn == SHARED_M2P_ENTRY )
+ if ( SHARED_M2P(gfn) )
{
P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
mfn);
/* We /really/ mean PFN here, even for non-translated guests. */
pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
- /* Shared pages are always read-only; invalid pages can't be dirty. */
- if ( unlikely(SHARED_M2P(pfn_x(pfn)) || !VALID_M2P(pfn_x(pfn))) )
+ /* Invalid pages can't be dirty. */
+ if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
return 0;
mfn = d->arch.paging.log_dirty.top;