{
/* All PoD: Mark the whole region invalid and tell caller
* we're done. */
- set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid, p2m->default_access);
+ p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
+ p2m->default_access);
p2m->pod.entry_count-=(1<<order);
BUG_ON(p2m->pod.entry_count < 0);
ret = 1;
mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
if ( t == p2m_populate_on_demand )
{
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
+ p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
+ p2m->default_access);
p2m->pod.entry_count--;
BUG_ON(p2m->pod.entry_count < 0);
pod--;
page = mfn_to_page(mfn);
- set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
+ p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
+ p2m->default_access);
set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
p2m_pod_cache_add(p2m, page, 0);
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
+ p2m_set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
/* Make none of the MFNs are used elsewhere... for example, mapped
out_reset:
if ( reset )
- set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
+ p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
out:
gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
}
/* Try to remove the page, restoring old mapping if it fails. */
- set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
/* See if the page was successfully unmapped. (Allow one refcount
unmap_domain_page(map[i]);
map[i] = NULL;
- set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
types[i], p2m->default_access);
continue;
* check timing. */
if ( j < PAGE_SIZE/sizeof(*map[i]) )
{
- set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
types[i], p2m->default_access);
}
else
{
pod_unlock(p2m);
gfn_aligned = (gfn >> order) << order;
- /* Note that we are supposed to call set_p2m_entry() 512 times to
+ /* Note that we are supposed to call p2m_set_entry() 512 times to
* split 1GB into 512 2MB pages here. But We only do once here because
- * set_p2m_entry() should automatically shatter the 1GB page into
+ * p2m_set_entry() should automatically shatter the 1GB page into
* 512 2MB pages. The rest of 511 calls are unnecessary.
*
* NOTE: In a fine-grained p2m locking scenario this operation
* may need to promote its locking from gfn->1g superpage
*/
- set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
+ p2m_set_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
p2m_populate_on_demand, p2m->default_access);
return 0;
}
gfn_aligned = (gfn >> order) << order;
- set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, p2m->default_access);
+ p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+ p2m->default_access);
for( i = 0; i < (1UL << order); i++ )
{
* need promoting the gfn lock from gfn->2M superpage */
gfn_aligned = (gfn>>order)<<order;
for(i=0; i<(1<<order); i++)
- set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
+ p2m_set_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
p2m_populate_on_demand, p2m->default_access);
if ( tb_init_done )
{
}
/* Now, actually do the two-way mapping */
- if ( !set_p2m_entry(p2m, gfn, _mfn(0), order,
+ if ( !p2m_set_entry(p2m, gfn, _mfn(0), order,
p2m_populate_on_demand, p2m->default_access) )
rc = -EINVAL;
else
}
-int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
+int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
struct domain *d = p2m->domain;
/* Initialise physmap tables for slot zero. Other code assumes this. */
p2m->defer_nested_flush = 1;
- if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+ if ( !p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
p2m_invalid, p2m->default_access) )
goto error;
p2m->defer_nested_flush = 0;
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
}
- set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access);
+ p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+ p2m->default_access);
}
void
/* Now, actually do the two-way mapping */
if ( mfn_valid(_mfn(mfn)) )
{
- if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) )
+ if ( !p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
+ p2m->default_access) )
{
rc = -EINVAL;
goto out; /* Failed to update p2m, bail without updating m2p. */
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn, mfn);
- if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
+ if ( !p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
p2m_invalid, p2m->default_access) )
rc = -EINVAL;
else
mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
if ( pt == ot )
- set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
+ p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
gfn_unlock(p2m, gfn, 0);
order = PAGE_ORDER_4K;
}
if ( pt == ot )
- set_p2m_entry(p2m, gfn, mfn, order, nt, a);
+ p2m_set_entry(p2m, gfn, mfn, order, nt, a);
gfn += 1UL << order;
gfn &= -1UL << order;
if ( !gfn )
}
P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
- rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
+ rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct,
+ p2m->default_access);
gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
- "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+ "set_mmio_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
return rc;
}
"clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
goto out;
}
- rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access);
+ rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid,
+ p2m->default_access);
out:
gfn_unlock(p2m, gfn, 0);
set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
- rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, p2m->default_access);
+ rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
+ p2m->default_access);
gfn_unlock(p2m, gfn, 0);
if ( 0 == rc )
gdprintk(XENLOG_ERR,
- "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+ "set_shared_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
return rc;
}
goto out;
/* Fix p2m entry */
- set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
+ p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
ret = 0;
out:
put_page(page);
/* Remove mapping from p2m table */
- set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
+ p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
/* Clear content before returning the page to Xen */
scrub_one_page(page);
if ( p2mt == p2m_ram_paging_out )
req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
- set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
+ p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
}
gfn_unlock(p2m, gfn, 0);
/* Make the page already guest-accessible. If the pager still has a
* pending resume operation, it will be idempotent p2m entry-wise,
* but will unpause the vcpu */
- set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
- paging_mode_log_dirty(d) ? p2m_ram_logdirty :
- p2m_ram_rw, a);
+ p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+ paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+ p2m_ram_rw, a);
set_gpfn_from_mfn(mfn_x(mfn), gfn);
if ( !page_extant )
* were nominated but not evicted */
if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
{
- set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K,
- paging_mode_log_dirty(d) ? p2m_ram_logdirty :
- p2m_ram_rw, a);
+ p2m_set_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K,
+ paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+ p2m_ram_rw, a);
set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
}
gfn_unlock(p2m, rsp.gfn, 0);