Replace one opencoded mfn_eq() and some coding style issues on altered lines.
Swap __mfn_valid() to being bool, although it can't be updated to take mfn_t
because of include dependencies.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Julien Grall <julien.grall@arm.com>
if ( mfn_eq(mfn, INVALID_MFN) )
goto err;
- if ( !mfn_valid(mfn_x(mfn)) )
+ if ( !mfn_valid(mfn) )
goto err;
/*
bool is_iomem_page(mfn_t mfn)
{
- return !mfn_valid(mfn_x(mfn));
+ return !mfn_valid(mfn);
}
void clear_and_clean_page(struct page_info *page)
{
unsigned long mfn = pte.p2m.base;
- ASSERT(mfn_valid(mfn));
+ ASSERT(mfn_valid(_mfn(mfn)));
put_page(mfn_to_page(mfn));
}
}
p2m_flush_tlb_sync(p2m);
mfn = _mfn(entry.p2m.base);
- ASSERT(mfn_valid(mfn_x(mfn)));
+ ASSERT(mfn_valid(mfn));
free_domheap_page(mfn_to_page(mfn_x(mfn)));
}
if ( rc )
goto err;
- if ( !mfn_valid(maddr >> PAGE_SHIFT) )
+ if ( !mfn_valid(_mfn(maddr >> PAGE_SHIFT)) )
goto err;
page = mfn_to_page(maddr >> PAGE_SHIFT);
if ( mi->module[i].kind == BOOTMOD_XEN )
continue;
- if ( !mfn_valid(paddr_to_pfn(s)) || !mfn_valid(paddr_to_pfn(e)))
+ if ( !mfn_valid(_mfn(paddr_to_pfn(s))) ||
+ !mfn_valid(_mfn(paddr_to_pfn(e))))
continue;
dt_unreserved_regions(s, e, init_domheap_pages, 0);
(mib->mc_status & MCi_STATUS_ADDRV) &&
(mc_check_addr(mib->mc_status, mib->mc_misc, MC_ADDR_PHYSICAL)) &&
(who == MCA_POLLER || who == MCA_CMCI_HANDLER) &&
- (mfn_valid(paddr_to_pfn(mib->mc_addr))))
+ (mfn_valid(_mfn(paddr_to_pfn(mib->mc_addr)))))
{
struct domain *d;
if ( is_hardware_domain(d) )
return 0;
- if (!mfn_valid(mfn_x(mfn)))
+ if ( !mfn_valid(mfn) )
return -EINVAL;
if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
if ( xenpmu_data )
{
mfn = domain_page_map_to_mfn(xenpmu_data);
- ASSERT(mfn_valid(mfn));
+ ASSERT(mfn_valid(_mfn(mfn)));
unmap_domain_page_global(xenpmu_data);
put_page_and_type(mfn_to_page(mfn));
}
DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, l1_table_offset(vaddr),
l1e, mfn_x(mfn));
- return mfn_valid(mfn_x(mfn)) ? mfn : INVALID_MFN;
+ return mfn_valid(mfn) ? mfn : INVALID_MFN;
}
/* Returns: number of bytes remaining to be copied */
unsigned long pfn = domctl->u.set_broken_page_p2m.pfn;
mfn_t mfn = get_gfn_query(d, pfn, &pt);
- if ( unlikely(!mfn_valid(mfn_x(mfn))) || unlikely(!p2m_is_ram(pt)) )
+ if ( unlikely(!mfn_valid(mfn)) || unlikely(!p2m_is_ram(pt)) )
ret = -EINVAL;
else
ret = p2m_change_type_one(d, pfn, pt, p2m_ram_broken);
return MTRR_TYPE_WRBACK;
}
- if ( !mfn_valid(mfn_x(mfn)) )
+ if ( !mfn_valid(mfn) )
{
*ipat = 1;
return MTRR_TYPE_UNCACHABLE;
/* Mark as I/O up to next RAM region. */
for ( ; pfn < rstart_pfn; pfn++ )
{
- if ( !mfn_valid(pfn) )
+ if ( !mfn_valid(_mfn(pfn)) )
continue;
share_xen_page_with_guest(
mfn_to_page(pfn), dom_io, XENSHARE_writable);
{
struct page_info *page = mfn_to_page(page_nr);
- if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
+ if ( unlikely(!mfn_valid(_mfn(page_nr))) || unlikely(!get_page(page, d)) )
{
MEM_LOG("Could not get page ref for pfn %lx", page_nr);
return 0;
{
struct page_info *page;
- if ( !mfn_valid(mfn_x(mfn)) )
+ if ( !mfn_valid(mfn) )
return true;
/* Caller must know that it is an iomem page, or a reference is held. */
return -EINVAL;
}
- if ( !mfn_valid(mfn) ||
+ if ( !mfn_valid(_mfn(mfn)) ||
(real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
{
int flip = 0;
/* Only needed the reference to confirm dom_io ownership. */
- if ( mfn_valid(mfn) )
+ if ( mfn_valid(_mfn(mfn)) )
put_page(page);
/* DOMID_IO reverts to caller for privilege checks. */
ASSERT(opt_allow_superpage);
- if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
+ if ( !mfn_valid(_mfn(mfn | (L1_PAGETABLE_ENTRIES - 1))) )
return -EINVAL;
spage = mfn_to_spage(mfn);
MEM_LOG("Unaligned superpage reference mfn %lx", mfn);
rc = -EINVAL;
}
- else if ( !mfn_valid(mfn | (L1_PAGETABLE_ENTRIES - 1)) )
+ else if ( !mfn_valid(_mfn(mfn | (L1_PAGETABLE_ENTRIES - 1))) )
rc = -EINVAL;
else if ( op.cmd == MMUEXT_MARK_SUPER )
rc = mark_superpage(mfn_to_spage(mfn), d);
/* Remove previously mapped page if it was present. */
prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
- if ( mfn_valid(prev_mfn) )
+ if ( mfn_valid(_mfn(prev_mfn)) )
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
/* We are looking only for read-only mappings of p.t. pages. */
ASSERT((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) == _PAGE_PRESENT);
- ASSERT(mfn_valid(mfn));
+ ASSERT(mfn_valid(_mfn(mfn)));
ASSERT((page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table);
ASSERT((page->u.inuse.type_info & PGT_count_mask) != 0);
ASSERT(page_get_owner(page) == d);
return 0;
mfn = l1e_get_pfn(pte);
- if ( mfn_valid(mfn) )
+ if ( mfn_valid(_mfn(mfn)) )
{
struct page_info *page = mfn_to_page(mfn);
struct domain *owner = page_get_owner_and_reference(page);
top_mfn = _mfn(page_to_mfn(top_page));
/* Map the top-level table and call the tree-walker */
- ASSERT(mfn_valid(mfn_x(top_mfn)));
+ ASSERT(mfn_valid(top_mfn));
top_map = map_domain_page(top_mfn);
#if GUEST_PAGING_LEVELS == 3
top_map += (cr3 & ~(PAGE_MASK | 31));
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/* NESTED VIRT P2M FUNCTIONS */
/********************************************/
/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
/* Check host p2m if no valid entry in alternate */
- if ( !mfn_valid(mfn_x(mfn)) )
+ if ( !mfn_valid(mfn) )
{
mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
P2M_ALLOC | P2M_UNSHARE, &page_order, 0);
rc = -ESRCH;
- if ( !mfn_valid(mfn_x(mfn)) || t != p2m_ram_rw )
+ if ( !mfn_valid(mfn) || t != p2m_ram_rw )
return rc;
/* If this is a superpage, copy that first */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
continue;
amfn = get_gfn_type_access(ap2m, gfn_x(gfn), &ap2mt, &ap2ma, 0, NULL);
- if ( mfn_valid(amfn) && (mfn_x(amfn) != mfn_x(mfn) || ap2ma != p2ma) )
+ if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) )
{
altp2m_list_unlock(d);
goto out;
{
struct domain *fdom;
- if ( !mfn_valid(new.mfn) )
+ if ( !mfn_valid(_mfn(new.mfn)) )
goto out;
rc = -ESRCH;
ept_entry = table + (gfn_remainder >> (i * EPT_TABLE_ORDER));
}
- if ( mfn_valid(mfn_x(mfn)) || p2m_allows_invalid_mfn(p2mt) )
+ if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
{
int emt = epte_get_entry_emt(p2m->domain, gfn, mfn,
i * EPT_TABLE_ORDER, &ipat, direct_mmio);
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef mfn_valid
-#define mfn_valid(_mfn) __mfn_valid(mfn_x(_mfn))
#undef page_to_mfn
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
{
struct page_info *page = mfn_to_page(mfn);
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
continue;
if ( is_page_in_use(page) && !is_xen_heap_page(page) )
{
{
struct page_info *page = __mfn_to_page(mfn);
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
continue;
if ( (mfn << PAGE_SHIFT) < __pa(&_end) )
continue; /* skip Xen */
l3e = l3t[l3_table_offset(addr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
- if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
return NULL;
if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
{
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
- if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
return NULL;
if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
{
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
- if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) )
return NULL;
ret:
continue;
for ( n = 0; n < CNT; ++n)
- if ( mfn_valid(i + n * PDX_GROUP_COUNT) )
+ if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) )
break;
if ( n == CNT )
continue;
va = RO_MPT_VIRT_START + i * sizeof(*machine_to_phys_mapping);
for ( n = 0; n < CNT; ++n)
- if ( mfn_valid(i + n * PDX_GROUP_COUNT) )
+ if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) )
break;
if ( n < CNT )
{
for ( holes = k = 0; k < 1 << PAGETABLE_ORDER; ++k)
{
for ( n = 0; n < CNT; ++n)
- if ( mfn_valid(MFN(i + k) + n * PDX_GROUP_COUNT) )
+ if ( mfn_valid(_mfn(MFN(i + k) + n * PDX_GROUP_COUNT)) )
break;
if ( n == CNT )
++holes;
}
for ( n = 0; n < CNT; ++n)
- if ( mfn_valid(MFN(i) + n * PDX_GROUP_COUNT) )
+ if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) )
break;
if ( n == CNT )
l1_pg = NULL;
memflags = MEMF_node(phys_to_nid(i <<
(L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT)));
for ( n = 0; n < CNT; ++n)
- if ( mfn_valid(MFN(i) + n * PDX_GROUP_COUNT) )
+ if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) )
break;
if ( n == CNT )
continue;
l4e = l4t[l4_table_offset(addr)];
unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
- pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ||
- !mfn_valid(mfn) )
+ !mfn_valid(_mfn(mfn)) )
return;
l3t = map_domain_page(_mfn(mfn));
l3e = l3t[l3_table_offset(addr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
- pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n",
l3_table_offset(addr), l3e_get_intpte(l3e), pfn,
(l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : "");
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
(l3e_get_flags(l3e) & _PAGE_PSE) ||
- !mfn_valid(mfn) )
+ !mfn_valid(_mfn(mfn)) )
return;
l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
- pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
(l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
(l2e_get_flags(l2e) & _PAGE_PSE) ||
- !mfn_valid(mfn) )
+ !mfn_valid(_mfn(mfn)) )
return;
l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
- pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ?
get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
*frame = page_to_mfn(*page);
#else
*frame = mfn_x(gfn_to_mfn(rd, _gfn(gfn)));
- *page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
+ *page = mfn_valid(_mfn(*frame)) ? mfn_to_page(*frame) : NULL;
if ( (!(*page)) || (!get_page(*page, rd)) )
{
*frame = mfn_x(INVALID_MFN);
/* pg may be set, with a refcount included, from __get_paged_frame */
if ( !pg )
{
- pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
+ pg = mfn_valid(_mfn(frame)) ? mfn_to_page(frame) : NULL;
if ( pg )
owner = page_get_owner_and_reference(pg);
}
#endif
/* Check the passed page frame for basic validity. */
- if ( unlikely(!mfn_valid(mfn)) )
+ if ( unlikely(!mfn_valid(_mfn(mfn))) )
{
put_gfn(d, gop.mfn);
gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
}
else
{
- ASSERT(mfn_valid(act->frame));
+ ASSERT(mfn_valid(_mfn(act->frame)));
*page = mfn_to_page(act->frame);
td = page_get_owner_and_reference(*page);
/*
d = rcu_lock_current_domain();
mfn = cflush->a.dev_bus_addr >> PAGE_SHIFT;
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
{
rcu_unlock_domain(d);
return -EINVAL;
for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
{
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
{
gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n",
mfn);
* actual page that needs to be released. */
if ( p2mt == p2m_ram_paging_out )
{
- ASSERT(mfn_valid(mfn_x(mfn)));
+ ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn_x(mfn));
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
#else
mfn = gfn_to_mfn(d, _gfn(gmfn));
#endif
- if ( unlikely(!mfn_valid(mfn_x(mfn))) )
+ if ( unlikely(!mfn_valid(mfn)) )
{
put_gfn(d, gmfn);
gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
#else /* !CONFIG_X86 */
mfn = mfn_x(gfn_to_mfn(d, _gfn(gmfn + k)));
#endif
- if ( unlikely(!mfn_valid(mfn)) )
+ if ( unlikely(!mfn_valid(_mfn(mfn))) )
{
put_gfn(d, gmfn + k);
rc = -EINVAL;
if ( (page_to_mfn(pg) & mask) )
{
/* Merge with predecessor block? */
- if ( !mfn_valid(page_to_mfn(pg-mask)) ||
+ if ( !mfn_valid(_mfn(page_to_mfn(pg-mask))) ||
!page_state_is(pg-mask, free) ||
(PFN_ORDER(pg-mask) != order) ||
(phys_to_nid(page_to_maddr(pg-mask)) != node) )
else
{
/* Merge with successor block? */
- if ( !mfn_valid(page_to_mfn(pg+mask)) ||
+ if ( !mfn_valid(_mfn(page_to_mfn(pg+mask))) ||
!page_state_is(pg+mask, free) ||
(PFN_ORDER(pg+mask) != order) ||
(phys_to_nid(page_to_maddr(pg+mask)) != node) )
struct domain *owner;
struct page_info *pg;
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
{
dprintk(XENLOG_WARNING,
"try to offline page out of range %lx\n", mfn);
struct page_info *pg;
int ret;
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
{
dprintk(XENLOG_WARNING, "call expand_pages() first\n");
return -EINVAL;
{
struct page_info *pg;
- if ( !mfn_valid(mfn) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
+ if ( !mfn_valid(_mfn(mfn)) || !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
{
dprintk(XENLOG_WARNING, "call expand_pages() first\n");
return -EINVAL;
pg = mfn_to_page(mfn);
/* Check the mfn is valid and page is free. */
- if ( !mfn_valid(mfn) || !page_state_is(pg, free) )
+ if ( !mfn_valid(_mfn(mfn)) || !page_state_is(pg, free) )
continue;
scrub_one_page(pg);
unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
(FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
-int __mfn_valid(unsigned long mfn)
+bool __mfn_valid(unsigned long mfn)
{
return likely(mfn < max_page) &&
likely(!(mfn & pfn_hole_mask)) &&
mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->ppr_log.reg_base),
sizeof(ppr_entry_t), tail);
- ASSERT(mfn_valid(mfn));
+ ASSERT(mfn_valid(_mfn(mfn)));
log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->event_log.reg_base),
sizeof(event_entry_t), tail);
- ASSERT(mfn_valid(mfn));
+ ASSERT(mfn_valid(_mfn(mfn)));
log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
dte_mfn = guest_iommu_get_table_mfn(d,
reg_to_u64(g_iommu->dev_table.reg_base),
sizeof(dev_entry_t), gbdf);
- ASSERT(mfn_valid(dte_mfn));
+ ASSERT(mfn_valid(_mfn(dte_mfn)));
/* Read guest dte information */
dte_base = map_domain_page(_mfn(dte_mfn));
gcr3_mfn = mfn_x(get_gfn(d, gcr3_gfn, &p2mt));
put_gfn(d, gcr3_gfn);
- ASSERT(mfn_valid(gcr3_mfn));
+ ASSERT(mfn_valid(_mfn(gcr3_mfn)));
iommu = find_iommu_for_device(0, mbdf);
if ( !iommu )
cmd_mfn = guest_iommu_get_table_mfn(d,
reg_to_u64(iommu->cmd_buffer.reg_base),
sizeof(cmd_entry_t), head);
- ASSERT(mfn_valid(cmd_mfn));
+ ASSERT(mfn_valid(_mfn(cmd_mfn)));
cmd_base = map_domain_page(_mfn(cmd_mfn));
cmd = cmd_base + head % entries_per_page;
* XXX Should we really map all non-RAM (above 4G)? Minimally
* a pfn_valid() check would seem desirable here.
*/
- if ( mfn_valid(pfn) )
+ if ( mfn_valid(_mfn(pfn)) )
{
int ret = amd_iommu_map_page(d, pfn, pfn,
IOMMUF_readable|IOMMUF_writable);
do
{
- if ( !mfn_valid(base) )
+ if ( !mfn_valid(_mfn(base)) )
{
printk(XENLOG_ERR VTDPREFIX
"Invalid pfn in RMRR range "ERMRRU_FMT"\n",
unsigned long pfn = pdx_to_pfn(i);
if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ?
- (!mfn_valid(pfn) ||
+ (!mfn_valid(_mfn(pfn)) ||
!page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) :
iommu_inclusive_mapping ?
page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) :
#else
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
+ (mfn_valid(_mfn(mfn)) && is_xen_heap_page(__mfn_to_page(mfn)))
#endif
#define is_xen_fixed_mfn(mfn) \
/* XXX -- account for base */
#define mfn_valid(mfn) ({ \
- unsigned long __m_f_n = (mfn); \
+ unsigned long __m_f_n = mfn_x(mfn); \
likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \
})
if ( !p2m_is_any_ram(p2mt) )
return NULL;
- if ( !mfn_valid(mfn) )
+ if ( !mfn_valid(_mfn(mfn)) )
return NULL;
page = mfn_to_page(mfn);
if (t)
*t = p2m_ram_rw;
page = __mfn_to_page(gfn);
- return mfn_valid(gfn) && get_page(page, d) ? page : NULL;
+ return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
}
* We define non-underscored wrappers for above conversion functions. These are
* overridden in various source files while underscored versions remain intact.
*/
-#define mfn_valid(mfn) __mfn_valid(mfn)
+#define mfn_valid(mfn) __mfn_valid(mfn_x(mfn))
#define virt_to_mfn(va) __virt_to_mfn(va)
#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va))
#define page_to_pdx(pg) ((pg) - frame_table)
#define pdx_to_page(pdx) (frame_table + (pdx))
-extern int __mfn_valid(unsigned long mfn);
+bool __mfn_valid(unsigned long mfn);
static inline unsigned long pfn_to_pdx(unsigned long pfn)
{
typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */
#define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
-#define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) )
+#define IS_VALID_PAGE(_pi) mfn_valid(_mfn(page_to_mfn(_pi)))
extern struct page_list_head tmem_page_list;
extern spinlock_t tmem_page_list_lock;