enum p2m_operation {
INSERT,
REMOVE,
- RELINQUISH,
MEMACCESS,
};
break;
- case RELINQUISH:
case REMOVE:
if ( !p2m_valid(orig_pte) )
{
{
switch ( op )
{
- case RELINQUISH:
- /*
- * Arbitrarily, preempt every 512 operations or 8192 nops.
- * 512*P2M_ONE_PROGRESS == 8192*P2M_ONE_PROGRESS_NOP == 0x2000
- * This is set in preempt_count_limit.
- *
- */
- p2m->lowest_mapped_gfn = _gfn(addr >> PAGE_SHIFT);
- rc = -ERESTART;
- goto out;
-
case MEMACCESS:
{
/*
return rc;
}
+/*
+ * The function will go through the p2m and remove page reference when it
+ * is required. The mapping will be removed from the p2m.
+ *
+ * XXX: See whether the mapping can be left intact in the p2m.
+ */
int relinquish_p2m_mapping(struct domain *d)
{
struct p2m_domain *p2m = &d->arch.p2m;
- unsigned long nr;
+ unsigned long count = 0;
+ p2m_type_t t;
+ int rc = 0;
+ unsigned int order;
+
+ /* Convenience alias */
+ gfn_t start = p2m->lowest_mapped_gfn;
+ gfn_t end = p2m->max_mapped_gfn;
+
+ p2m_write_lock(p2m);
+
+ for ( ; gfn_x(start) < gfn_x(end);
+ start = gfn_next_boundary(start, order) )
+ {
+ mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order);
+
+ count++;
+ /*
+ * Arbitrarily preempt every 512 iterations.
+ */
+ if ( !(count % 512) && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
- nr = gfn_x(p2m->max_mapped_gfn) - gfn_x(p2m->lowest_mapped_gfn);
+ /*
+ * p2m_set_entry will take care of removing reference on page
+ * when it is necessary and removing the mapping in the p2m.
+ */
+ if ( !mfn_eq(mfn, INVALID_MFN) )
+ {
+ /*
+ * For valid mapping, the start will always be aligned as
+ * entry will be removed whilst relinquishing.
+ */
+ rc = __p2m_set_entry(p2m, start, order, INVALID_MFN,
+ p2m_invalid, p2m_access_rwx);
+ if ( unlikely(rc) )
+ {
+ printk(XENLOG_G_ERR "Unable to remove mapping gfn=%#"PRI_gfn" order=%u from the p2m of domain %d\n", gfn_x(start), order, d->domain_id);
+ break;
+ }
+ }
+ }
- return apply_p2m_changes(d, RELINQUISH, p2m->lowest_mapped_gfn, nr,
- INVALID_MFN, 0, p2m_invalid,
- d->arch.p2m.default_access);
+ /*
+ * Update lowest_mapped_gfn so on the next call we still start where
+ * we stopped.
+ */
+ p2m->lowest_mapped_gfn = start;
+
+ p2m_write_unlock(p2m);
+
+ return rc;
}
int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)