return apply_p2m_changes(d, RELINQUISH,
pfn_to_paddr(p2m->lowest_mapped_gfn),
pfn_to_paddr(p2m->max_mapped_gfn),
- pfn_to_paddr(INVALID_MFN),
+ pfn_to_paddr(mfn_x(INVALID_MFN)),
MATTR_MEM, 0, p2m_invalid,
d->arch.p2m.default_access);
}
return apply_p2m_changes(d, CACHEFLUSH,
pfn_to_paddr(start_mfn),
pfn_to_paddr(end_mfn),
- pfn_to_paddr(INVALID_MFN),
+ pfn_to_paddr(mfn_x(INVALID_MFN)),
MATTR_MEM, 0, p2m_invalid,
d->arch.p2m.default_access);
}
gfn = PFN_DOWN(gaddr);
mfn = mfn_x(get_gfn(d, gfn, &t));
- if ( mfn == INVALID_MFN )
+ if ( mfn == mfn_x(INVALID_MFN) )
{
put_gfn(d, gfn);
put_domain(d);
typedef unsigned char dbgbyte_t;
/* Returns: mfn for the given (hvm guest) vaddr */
-static unsigned long
+static mfn_t
dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr,
unsigned long *gfn)
{
- unsigned long mfn;
+ mfn_t mfn;
uint32_t pfec = PFEC_page_present;
p2m_type_t gfntype;
return INVALID_MFN;
}
- mfn = mfn_x(get_gfn(dp, *gfn, &gfntype));
+ mfn = get_gfn(dp, *gfn, &gfntype);
if ( p2m_is_readonly(gfntype) && toaddr )
{
DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
mfn = INVALID_MFN;
}
else
- DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n", vaddr, dp->domain_id, mfn);
+ DBGP2("X: vaddr:%lx domid:%d mfn:%#"PRI_mfn"\n",
+ vaddr, dp->domain_id, mfn_x(mfn));
- if ( mfn == INVALID_MFN )
+ if ( mfn_eq(mfn, INVALID_MFN) )
{
put_gfn(dp, *gfn);
*gfn = INVALID_GFN;
* mode.
* Returns: mfn for the given (pv guest) vaddr
*/
-static unsigned long
+static mfn_t
dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
{
l4_pgentry_t l4e, *l4t;
l2_pgentry_t l2e, *l2t;
l1_pgentry_t l1e, *l1t;
unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
- unsigned long mfn = cr3 >> PAGE_SHIFT;
+ mfn_t mfn = _mfn(cr3 >> PAGE_SHIFT);
DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
cr3, pgd3val);
if ( pgd3val == 0 )
{
- l4t = map_domain_page(_mfn(mfn));
+ l4t = map_domain_page(mfn);
l4e = l4t[l4_table_offset(vaddr)];
unmap_domain_page(l4t);
- mfn = l4e_get_pfn(l4e);
- DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,
- l4_table_offset(vaddr), l4e, mfn);
+ mfn = _mfn(l4e_get_pfn(l4e));
+ DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#"PRI_mfn"\n", l4t,
+ l4_table_offset(vaddr), l4e, mfn_x(mfn));
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
{
DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
- l3t = map_domain_page(_mfn(mfn));
+ l3t = map_domain_page(mfn);
l3e = l3t[l3_table_offset(vaddr)];
unmap_domain_page(l3t);
- mfn = l3e_get_pfn(l3e);
- DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,
- l3_table_offset(vaddr), l3e, mfn);
+ mfn = _mfn(l3e_get_pfn(l3e));
+ DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#"PRI_mfn"\n", l3t,
+ l3_table_offset(vaddr), l3e, mfn_x(mfn));
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
(l3e_get_flags(l3e) & _PAGE_PSE) )
{
}
}
- l2t = map_domain_page(_mfn(mfn));
+ l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(vaddr)];
unmap_domain_page(l2t);
- mfn = l2e_get_pfn(l2e);
- DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
- l2e, mfn);
+ mfn = _mfn(l2e_get_pfn(l2e));
+ DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#"PRI_mfn"\n",
+ l2t, l2_table_offset(vaddr), l2e, mfn_x(mfn));
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
(l2e_get_flags(l2e) & _PAGE_PSE) )
{
DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
- l1t = map_domain_page(_mfn(mfn));
+ l1t = map_domain_page(mfn);
l1e = l1t[l1_table_offset(vaddr)];
unmap_domain_page(l1t);
- mfn = l1e_get_pfn(l1e);
- DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
- l1e, mfn);
+ mfn = _mfn(l1e_get_pfn(l1e));
+ DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, l1_table_offset(vaddr),
+ l1e, mfn_x(mfn));
- return mfn_valid(mfn) ? mfn : INVALID_MFN;
+ return mfn_valid(mfn_x(mfn)) ? mfn : INVALID_MFN;
}
/* Returns: number of bytes remaining to be copied */
{
char *va;
unsigned long addr = (unsigned long)gaddr;
- unsigned long mfn, gfn = INVALID_GFN, pagecnt;
+ mfn_t mfn;
+ unsigned long gfn = INVALID_GFN, pagecnt;
pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
mfn = (has_hvm_container_domain(dp)
? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
: dbg_pv_va2mfn(addr, dp, pgd3));
- if ( mfn == INVALID_MFN )
+ if ( mfn_eq(mfn, INVALID_MFN) )
break;
- va = map_domain_page(_mfn(mfn));
+ va = map_domain_page(mfn);
va = va + (addr & (PAGE_SIZE-1));
if ( toaddr )
{
copy_from_user(va, buf, pagecnt); /* va = buf */
- paging_mark_dirty(dp, mfn);
+ paging_mark_dirty(dp, mfn_x(mfn));
}
else
{
p2m = hostp2m;
/* Check access permissions first, then handle faults */
- if ( mfn_x(mfn) != INVALID_MFN )
+ if ( !mfn_eq(mfn, INVALID_MFN) )
{
bool_t violation;
rc = -EINVAL;
if ( (gfn_x(vcpu_altp2m(curr).veinfo_gfn) != INVALID_GFN) ||
- (mfn_x(get_gfn_query_unlocked(curr->domain,
- a.u.enable_notify.gfn, &p2mt)) == INVALID_MFN) )
+ mfn_eq(get_gfn_query_unlocked(curr->domain,
+ a.u.enable_notify.gfn, &p2mt), INVALID_MFN) )
return -EINVAL;
vcpu_altp2m(curr).veinfo_gfn = _gfn(a.u.enable_notify.gfn);
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
- page ? page_to_mfn(page) : INVALID_MFN);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
return;
}
return;
fail:
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
- page ? page_to_mfn(page) : INVALID_MFN);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn,
+ page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
}
static void teardown_apic_assist(struct vcpu *v)
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
- page ? page_to_mfn(page) : INVALID_MFN);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
return;
}
mfn = get_gfn_query_unlocked(d, gfn_x(vcpu_altp2m(v).veinfo_gfn), &t);
- if ( mfn_x(mfn) != INVALID_MFN )
+ if ( !mfn_eq(mfn, INVALID_MFN) )
__vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
else
v->arch.hvm_vmx.secondary_exec_control &=
start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) +
((va >> PAGE_SHIFT) & GUEST_L3_GFN_MASK));
gw->l1e = guest_l1e_from_gfn(start, flags);
- gw->l2mfn = gw->l1mfn = _mfn(INVALID_MFN);
+ gw->l2mfn = gw->l1mfn = INVALID_MFN;
goto set_ad;
}
start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
guest_l1_table_offset(va));
gw->l1e = guest_l1e_from_gfn(start, flags);
- gw->l1mfn = _mfn(INVALID_MFN);
+ gw->l1mfn = INVALID_MFN;
}
else
{
oom:
HAP_ERROR("out of memory building monitor pagetable\n");
domain_crash(d);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
}
for ( i = 0; i < MAX_EPTP; i++ )
- d->arch.altp2m_eptp[i] = INVALID_MFN;
+ d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
for ( i = 0; i < MAX_ALTP2M; i++ )
{
int level)
{
int rc;
- unsigned long oldmfn = INVALID_MFN;
+ unsigned long oldmfn = mfn_x(INVALID_MFN);
bool_t check_foreign = (new.mfn != entryptr->mfn ||
new.sa_p2mt != entryptr->sa_p2mt);
write_atomic(&entryptr->epte, new.epte);
- if ( unlikely(oldmfn != INVALID_MFN) )
+ if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
put_page(mfn_to_page(oldmfn));
rc = 0;
int i;
int ret = 0;
bool_t recalc = 0;
- mfn_t mfn = _mfn(INVALID_MFN);
+ mfn_t mfn = INVALID_MFN;
struct ept_data *ept = &p2m->ept;
*t = p2m_mmio_dm;
{
/* All PoD: Mark the whole region invalid and tell caller
* we're done. */
- p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
+ p2m_set_entry(p2m, gpfn, INVALID_MFN, order, p2m_invalid,
p2m->default_access);
p2m->pod.entry_count-=(1<<order);
BUG_ON(p2m->pod.entry_count < 0);
n = 1UL << cur_order;
if ( t == p2m_populate_on_demand )
{
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+ p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
p2m_invalid, p2m->default_access);
p2m->pod.entry_count -= n;
BUG_ON(p2m->pod.entry_count < 0);
page = mfn_to_page(mfn);
- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
+ p2m_set_entry(p2m, gpfn + i, INVALID_MFN, cur_order,
p2m_invalid, p2m->default_access);
p2m_tlb_flush_sync(p2m);
for ( j = 0; j < n; ++j )
static int
p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
{
- mfn_t mfn, mfn0 = _mfn(INVALID_MFN);
+ mfn_t mfn, mfn0 = INVALID_MFN;
p2m_type_t type, type0 = 0;
unsigned long * map = NULL;
int ret=0, reset = 0;
}
/* Try to remove the page, restoring old mapping if it fails. */
- p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
+ p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
p2m_tlb_flush_sync(p2m);
}
/* Try to remove the page, restoring old mapping if it fails. */
- p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
* NOTE: In a fine-grained p2m locking scenario this operation
* may need to promote its locking from gfn->1g superpage
*/
- p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
+ p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
return 0;
}
* need promoting the gfn lock from gfn->2M superpage */
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
- p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
}
/* Now, actually do the two-way mapping */
- rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order,
+ rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
p2m_populate_on_demand, p2m->default_access);
if ( rc == 0 )
{
* the intermediate one might be).
*/
unsigned int flags, iommu_old_flags = 0;
- unsigned long old_mfn = INVALID_MFN;
+ unsigned long old_mfn = mfn_x(INVALID_MFN);
ASSERT(sve != 0);
p2m->max_mapped_pfn )
break;
}
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
{
unmap_domain_page(l4e);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
mfn = _mfn(l4e_get_pfn(*l4e));
recalc = needs_recalc(l4, *l4e);
*t = p2m_populate_on_demand;
}
unmap_domain_page(l3e);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
if ( flags & _PAGE_PSE )
{
unmap_domain_page(l3e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
- return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
+ return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
}
mfn = _mfn(l3e_get_pfn(*l3e));
}
unmap_domain_page(l2e);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
if ( flags & _PAGE_PSE )
{
unmap_domain_page(l2e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
- return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);
+ return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
}
mfn = _mfn(l2e_get_pfn(*l2e));
}
unmap_domain_page(l1e);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
mfn = _mfn(l1e_get_pfn(*l1e));
*t = recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
unmap_domain_page(l1e);
ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
- return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
+ return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : INVALID_MFN;
}
static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
if (unlikely((p2m_is_broken(*t))))
{
/* Return invalid_mfn to avoid caller's access */
- mfn = _mfn(INVALID_MFN);
+ mfn = INVALID_MFN;
if ( q & P2M_ALLOC )
domain_crash(p2m->domain);
}
rc = set_rc;
gfn += 1ul << order;
- if ( mfn_x(mfn) != INVALID_MFN )
- mfn = _mfn(mfn_x(mfn) + (1ul << order));
+ if ( !mfn_eq(mfn, INVALID_MFN) )
+ mfn = mfn_add(mfn, 1ul << order);
todo -= 1ul << order;
}
/* Initialise physmap tables for slot zero. Other code assumes this. */
p2m->defer_nested_flush = 1;
- rc = p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ rc = p2m_set_entry(p2m, 0, INVALID_MFN, PAGE_ORDER_4K,
p2m_invalid, p2m->default_access);
p2m->defer_nested_flush = 0;
p2m_unlock(p2m);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
}
- return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+ return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
p2m->default_access);
}
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn_x(gfn), mfn_x(mfn));
- rc = p2m_set_entry(p2m, gfn_x(gfn), _mfn(INVALID_MFN), page_order,
+ rc = p2m_set_entry(p2m, gfn_x(gfn), INVALID_MFN, page_order,
p2m_invalid, p2m->default_access);
if ( rc == 0 )
{
}
/* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
- if ( (INVALID_MFN == mfn_x(actual_mfn)) || (t != p2m_mmio_direct) )
+ if ( mfn_eq(actual_mfn, INVALID_MFN) || (t != p2m_mmio_direct) )
{
gdprintk(XENLOG_ERR,
"gfn_to_mfn failed! gfn=%08lx type:%d\n", gfn, t);
gdprintk(XENLOG_WARNING,
"no mapping between mfn %08lx and gfn %08lx\n",
mfn_x(mfn), gfn);
- rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, p2m_invalid,
+ rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid,
p2m->default_access);
out:
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
if ( p2mt == p2m_mmio_direct && mfn_x(mfn) == gfn )
{
- ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
p2m_invalid, p2m->default_access);
gfn_unlock(p2m, gfn, 0);
}
put_page(page);
/* Remove mapping from p2m table */
- ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K,
p2m_ram_paged, a);
/* Clear content before returning the page to Xen */
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
- d->arch.altp2m_eptp[altp2m_idx] == INVALID_MFN )
+ d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
return -EINVAL;
ap2m = d->arch.altp2m_p2m[altp2m_idx];
mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
gfn_unlock(p2m, gfn, 0);
- if ( mfn_x(mfn) == INVALID_MFN )
+ if ( mfn_eq(mfn, INVALID_MFN) )
return -ESRCH;
if ( (unsigned) a >= ARRAY_SIZE(memaccess) )
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+ if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
p2m = d->arch.altp2m_p2m[i];
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+ if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
if ( idx != vcpu_altp2m(v).p2midx )
{
0, &page_order);
__put_gfn(*ap2m, gfn_x(gfn));
- if ( mfn_x(mfn) != INVALID_MFN )
+ if ( !mfn_eq(mfn, INVALID_MFN) )
return 0;
mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
P2M_ALLOC | P2M_UNSHARE, &page_order);
__put_gfn(hp2m, gfn_x(gfn));
- if ( mfn_x(mfn) == INVALID_MFN )
+ if ( mfn_eq(mfn, INVALID_MFN) )
return 0;
p2m_lock(*ap2m);
/* Uninit and reinit ept to force TLB shootdown */
ept_p2m_uninit(d->arch.altp2m_p2m[i]);
ept_p2m_init(d->arch.altp2m_p2m[i]);
- d->arch.altp2m_eptp[i] = INVALID_MFN;
+ d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
}
altp2m_list_unlock(d);
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] == INVALID_MFN )
+ if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
{
p2m_init_altp2m_helper(d, idx);
rc = 0;
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != INVALID_MFN )
+ if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
p2m_init_altp2m_helper(d, i);
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+ if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
p2m = d->arch.altp2m_p2m[idx];
/* Uninit and reinit ept to force TLB shootdown */
ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
ept_p2m_init(d->arch.altp2m_p2m[idx]);
- d->arch.altp2m_eptp[idx] = INVALID_MFN;
+ d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
rc = 0;
}
}
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+ if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
for_each_vcpu( d, v )
if ( idx != vcpu_altp2m(v).p2midx )
unsigned int page_order;
int rc = -EINVAL;
- if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
+ if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
return rc;
hp2m = p2m_get_hostp2m(d);
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+ if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
p2m = d->arch.altp2m_p2m[i];
m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
/* Check for a dropped page that may impact this altp2m */
- if ( mfn_x(mfn) == INVALID_MFN &&
+ if ( mfn_eq(mfn, INVALID_MFN) &&
gfn_x(gfn) >= p2m->min_remapped_gfn &&
gfn_x(gfn) <= p2m->max_remapped_gfn )
{
for ( i = 0; i < MAX_ALTP2M; i++ )
{
if ( i == last_reset_idx ||
- d->arch.altp2m_eptp[i] == INVALID_MFN )
+ d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
p2m = d->arch.altp2m_p2m[i];
goto out;
}
}
- else if ( mfn_x(m) != INVALID_MFN )
+ else if ( !mfn_eq(m, INVALID_MFN) )
p2m_set_entry(p2m, gfn_x(gfn), mfn, page_order, p2mt, p2ma);
__put_gfn(p2m, gfn_x(gfn));
if ( unlikely(page == NULL) )
{
d->arch.paging.log_dirty.failed_allocs++;
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
d->arch.paging.log_dirty.allocs++;
int i;
mfn_t *node = map_domain_page(mfn);
for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
- node[i] = _mfn(INVALID_MFN);
+ node[i] = INVALID_MFN;
unmap_domain_page(node);
}
return mfn;
unmap_domain_page(l2);
paging_free_log_dirty_page(d, l3[i3]);
- l3[i3] = _mfn(INVALID_MFN);
+ l3[i3] = INVALID_MFN;
if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
{
if ( rc )
break;
paging_free_log_dirty_page(d, l4[i4]);
- l4[i4] = _mfn(INVALID_MFN);
+ l4[i4] = INVALID_MFN;
if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
{
if ( !rc )
{
paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+ d->arch.paging.log_dirty.top = INVALID_MFN;
ASSERT(d->arch.paging.log_dirty.allocs == 0);
d->arch.paging.log_dirty.failed_allocs = 0;
/* This must be initialized separately from the rest of the
* log-dirty init code as that can be called more than once and we
* don't want to leak any active log-dirty bitmaps */
- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+ d->arch.paging.log_dirty.top = INVALID_MFN;
/*
* Shadow pagetables are the default, but we will use
for ( i = 0; i < SHADOW_OOS_PAGES; i++ )
{
- v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN);
- v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN);
+ v->arch.paging.shadow.oos[i] = INVALID_MFN;
+ v->arch.paging.shadow.oos_snapshot[i] = INVALID_MFN;
for ( j = 0; j < SHADOW_OOS_FIXUPS; j++ )
- v->arch.paging.shadow.oos_fixup[i].smfn[j] = _mfn(INVALID_MFN);
+ v->arch.paging.shadow.oos_fixup[i].smfn[j] = INVALID_MFN;
}
#endif
int i;
for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ )
{
- if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
+ if ( !mfn_eq(fixup->smfn[i], INVALID_MFN) )
{
sh_remove_write_access_from_sl1p(d, gmfn,
fixup->smfn[i],
fixup->off[i]);
- fixup->smfn[i] = _mfn(INVALID_MFN);
+ fixup->smfn[i] = INVALID_MFN;
}
}
next = oos_fixup[idx].next;
- if ( mfn_x(oos_fixup[idx].smfn[next]) != INVALID_MFN )
+ if ( !mfn_eq(oos_fixup[idx].smfn[next], INVALID_MFN) )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
struct oos_fixup fixup = { .next = 0 };
for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
- fixup.smfn[i] = _mfn(INVALID_MFN);
+ fixup.smfn[i] = INVALID_MFN;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
oidx = idx;
idx = (idx + 1) % SHADOW_OOS_PAGES;
if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
{
- oos[idx] = _mfn(INVALID_MFN);
+ oos[idx] = INVALID_MFN;
return;
}
}
SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn));
BUG();
- return _mfn(INVALID_MFN);
}
/* Pull a single guest page back into sync */
if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
{
_sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
- oos[idx] = _mfn(INVALID_MFN);
+ oos[idx] = INVALID_MFN;
return;
}
}
{
/* Write-protect and sync contents */
_sh_resync(v, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
- oos[idx] = _mfn(INVALID_MFN);
+ oos[idx] = INVALID_MFN;
}
resync_others:
{
/* Write-protect and sync contents */
_sh_resync(other, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
- oos[idx] = _mfn(INVALID_MFN);
+ oos[idx] = INVALID_MFN;
}
}
}
if ( likely(((vaddr + bytes - 1) & PAGE_MASK) == (vaddr & PAGE_MASK)) )
{
/* Whole write fits on a single page. */
- sh_ctxt->mfn[1] = _mfn(INVALID_MFN);
+ sh_ctxt->mfn[1] = INVALID_MFN;
map = map_domain_page(sh_ctxt->mfn[0]) + (vaddr & ~PAGE_MASK);
}
else if ( !is_hvm_domain(d) )
}
perfc_incr(shadow_hash_lookup_miss);
- return _mfn(INVALID_MFN);
+ return INVALID_MFN;
}
void shadow_hash_insert(struct domain *d, unsigned long n, unsigned int t,
};
static const unsigned int callback_mask = SHF_L3_64;
- hash_vcpu_foreach(v, callback_mask, callbacks, _mfn(INVALID_MFN));
+ hash_vcpu_foreach(v, callback_mask, callbacks, INVALID_MFN);
}
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
+ if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) )
{
int i;
for(i = 0; i < SHADOW_OOS_PAGES; i++)
if ( mfn_valid(oos_snapshot[i]) )
{
shadow_free(d, oos_snapshot[i]);
- oos_snapshot[i] = _mfn(INVALID_MFN);
+ oos_snapshot[i] = INVALID_MFN;
}
}
#endif /* OOS */
if ( mfn_valid(oos_snapshot[i]) )
{
shadow_free(d, oos_snapshot[i]);
- oos_snapshot[i] = _mfn(INVALID_MFN);
+ oos_snapshot[i] = INVALID_MFN;
}
}
#endif /* OOS */
memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
else
{
- unsigned long map_mfn = INVALID_MFN;
+ unsigned long map_mfn = mfn_x(INVALID_MFN);
void *map_sl1p = NULL;
/* Iterate over VRAM to track dirty bits. */
int dirty = 0;
paddr_t sl1ma = dirty_vram->sl1ma[i];
- if (mfn_x(mfn) == INVALID_MFN)
+ if ( !mfn_eq(mfn, INVALID_MFN) )
{
dirty = 1;
}
for ( i = begin_pfn; i < end_pfn; i++ )
{
mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
- if ( mfn_x(mfn) != INVALID_MFN )
+ if ( !mfn_eq(mfn, INVALID_MFN) )
flush_tlb |= sh_remove_write_access(d, mfn, 1, 0);
}
dirty_vram->last_dirty = -1;
}
}
- hash_vcpu_foreach(v, mask, callbacks, _mfn(INVALID_MFN));
+ hash_vcpu_foreach(v, mask, callbacks, INVALID_MFN);
}
#endif /* Shadow audit */
{
return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
#if GUEST_PAGING_LEVELS == 3 /* PAE */
- _mfn(INVALID_MFN),
+ INVALID_MFN,
v->arch.paging.shadow.gl3e
#else /* 32 or 64 */
pagetable_get_mfn(v->arch.guest_table),
if ( mfn_valid(gw->l4mfn)
&& mfn_valid((smfn = get_shadow_status(d, gw->l4mfn,
SH_type_l4_shadow))) )
- (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_l4_table(v, smfn, INVALID_MFN);
if ( mfn_valid(gw->l3mfn)
&& mfn_valid((smfn = get_shadow_status(d, gw->l3mfn,
SH_type_l3_shadow))) )
- (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_l3_table(v, smfn, INVALID_MFN);
#endif /* PAE or 64... */
if ( mfn_valid(gw->l2mfn) )
{
if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
SH_type_l2_shadow))) )
- (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_l2_table(v, smfn, INVALID_MFN);
#if GUEST_PAGING_LEVELS == 3
if ( mfn_valid((smfn = get_shadow_status(d, gw->l2mfn,
SH_type_l2h_shadow))) )
- (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_l2_table(v, smfn, INVALID_MFN);
#endif
}
if ( mfn_valid(gw->l1mfn)
&& mfn_valid((smfn = get_shadow_status(d, gw->l1mfn,
SH_type_l1_shadow))) )
- (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_l1_table(v, smfn, INVALID_MFN);
else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
&& (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
&& mfn_valid(
(smfn = get_fl1_shadow_status(d, guest_l2e_get_gfn(gw->l2e)))) )
- (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
+ (void) sh_audit_fl1_table(v, smfn, INVALID_MFN);
}
#else
{
#if GUEST_PAGING_LEVELS >= 4 /* 64bit... */
struct domain *d = v->domain;
- mfn_t sl3mfn = _mfn(INVALID_MFN);
+ mfn_t sl3mfn = INVALID_MFN;
shadow_l3e_t *sl3e;
if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
/* Get the l3e */
shadow_l4e_t new_sl4e;
guest_l4e_t new_gl4e = *(guest_l4e_t *)new_ge;
shadow_l4e_t *sl4p = se;
- mfn_t sl3mfn = _mfn(INVALID_MFN);
+ mfn_t sl3mfn = INVALID_MFN;
struct domain *d = v->domain;
p2m_type_t p2mt;
int result = 0;
shadow_l3e_t new_sl3e;
guest_l3e_t new_gl3e = *(guest_l3e_t *)new_ge;
shadow_l3e_t *sl3p = se;
- mfn_t sl2mfn = _mfn(INVALID_MFN);
+ mfn_t sl2mfn = INVALID_MFN;
p2m_type_t p2mt;
int result = 0;
shadow_l2e_t new_sl2e;
guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
shadow_l2e_t *sl2p = se;
- mfn_t sl1mfn = _mfn(INVALID_MFN);
+ mfn_t sl1mfn = INVALID_MFN;
p2m_type_t p2mt;
int result = 0;
static inline void reset_early_unshadow(struct vcpu *v)
{
#if SHADOW_OPTIMIZATIONS & SHOPT_EARLY_UNSHADOW
- v->arch.paging.shadow.last_emulated_mfn_for_unshadow = INVALID_MFN;
+ v->arch.paging.shadow.last_emulated_mfn_for_unshadow = mfn_x(INVALID_MFN);
#endif
}
? SH_type_l2h_shadow
: SH_type_l2_shadow);
else
- sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+ sh_set_toplevel_shadow(v, i, INVALID_MFN, 0);
}
else
- sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+ sh_set_toplevel_shadow(v, i, INVALID_MFN, 0);
}
}
#elif GUEST_PAGING_LEVELS == 4
if ( fast_path ) {
if ( pagetable_is_null(v->arch.shadow_table[i]) )
- smfn = _mfn(INVALID_MFN);
+ smfn = INVALID_MFN;
else
smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[i]));
}
/* retrieving the l2s */
gmfn = get_gfn_query_unlocked(d, gfn_x(guest_l3e_get_gfn(gl3e[i])),
&p2mt);
- smfn = unlikely(mfn_x(gmfn) == INVALID_MFN)
- ? _mfn(INVALID_MFN)
+ smfn = unlikely(mfn_eq(gmfn, INVALID_MFN))
+ ? INVALID_MFN
: shadow_hash_lookup(d, mfn_x(gmfn), SH_type_l2_pae_shadow);
}
{
guest_l1e_t *gl1e, e;
shadow_l1e_t *sl1e;
- mfn_t gl1mfn = _mfn(INVALID_MFN);
+ mfn_t gl1mfn = INVALID_MFN;
int f;
int done = 0;
v->vcpu_info = ((v->vcpu_id < XEN_LEGACY_MAX_VCPUS)
? (vcpu_info_t *)&shared_info(d, vcpu_info[v->vcpu_id])
: &dummy_vcpu_info);
- v->vcpu_info_mfn = INVALID_MFN;
+ v->vcpu_info_mfn = mfn_x(INVALID_MFN);
}
struct vcpu *alloc_vcpu(
if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )
return -EINVAL;
- if ( v->vcpu_info_mfn != INVALID_MFN )
+ if ( v->vcpu_info_mfn != mfn_x(INVALID_MFN) )
return -EINVAL;
/* Run this command on yourself or on other offline VCPUS. */
{
unsigned long mfn;
- if ( v->vcpu_info_mfn == INVALID_MFN )
+ if ( v->vcpu_info_mfn == mfn_x(INVALID_MFN) )
return;
mfn = v->vcpu_info_mfn;
(readonly) ? P2M_ALLOC : P2M_UNSHARE);
if ( !(*page) )
{
- *frame = INVALID_MFN;
+ *frame = mfn_x(INVALID_MFN);
if ( p2m_is_shared(p2mt) )
return GNTST_eagain;
if ( p2m_is_paging(p2mt) )
*page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
if ( (!(*page)) || (!get_page(*page, rd)) )
{
- *frame = INVALID_MFN;
+ *frame = mfn_x(INVALID_MFN);
*page = NULL;
rc = GNTST_bad_page;
}
p2m_type_t __p2mt;
mfn = mfn_x(get_gfn_unshare(d, gop.mfn, &__p2mt));
if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) )
- mfn = INVALID_MFN;
+ mfn = mfn_x(INVALID_MFN);
}
#else
mfn = mfn_x(gfn_to_mfn(d, _gfn(gop.mfn)));
TYPE_SAFE(unsigned long, mfn);
#define PRI_mfn "05lx"
-#define INVALID_MFN (~0UL)
+#define INVALID_MFN _mfn(~0UL)
#ifndef mfn_t
#define mfn_t /* Grep fodder: mfn_t, _mfn() and mfn_x() are defined above */