}
}
+/*
+ * Atomically write a P2M entry and update the paging-assistance state
+ * appropriately.
+ * Arguments: the domain in question, the GFN whose mapping is being updated,
+ * a pointer to the entry to be written, the MFN in which the entry resides,
+ * the new contents of the entry, and the level in the p2m tree at which
+ * we are writing.
+ */
+static int write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
+ l1_pgentry_t *p, l1_pgentry_t new,
+ unsigned int level)
+{
+ struct domain *d = p2m->domain;
+ const struct vcpu *v = current;
+ int rc = 0;
+
+ if ( v->domain != d )
+ v = d->vcpu ? d->vcpu[0] : NULL;
+ if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v)) )
+ rc = paging_get_hostmode(v)->write_p2m_entry(p2m, gfn, p, new, level);
+ else
+ safe_write_pte(p, new);
+
+ return rc;
+}
// Find the next level's P2M entry, checking for out-of-range gfn's...
// Returns NULL on error.
entry_content.l1 = l3e_content.l3;
rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
- /* NB: paging_write_p2m_entry() handles tlb flushes properly */
+ /* NB: write_p2m_entry() handles tlb flushes properly */
if ( rc )
goto out;
}
/* level 1 entry */
rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
- /* NB: paging_write_p2m_entry() handles tlb flushes properly */
+ /* NB: write_p2m_entry() handles tlb flushes properly */
if ( rc )
goto out;
}
entry_content.l1 = l2e_content.l2;
rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
- /* NB: paging_write_p2m_entry() handles tlb flushes properly */
+ /* NB: write_p2m_entry() handles tlb flushes properly */
if ( rc )
goto out;
}
p2m->recalc = do_recalc;
p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
- p2m->write_p2m_entry = paging_write_p2m_entry;
+ p2m->write_p2m_entry = write_p2m_entry;
#if P2M_AUDIT
p2m->audit_p2m = p2m_pt_audit_p2m;
#else
v->arch.paging.nestedmode = NULL;
hvm_asid_flush_vcpu(v);
}
-#endif
-
-int paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, l1_pgentry_t new,
- unsigned int level)
-{
- struct domain *d = p2m->domain;
- struct vcpu *v = current;
- int rc = 0;
-
- if ( v->domain != d )
- v = d->vcpu ? d->vcpu[0] : NULL;
- if ( likely(v && paging_mode_enabled(d) && paging_get_hostmode(v) != NULL) )
- rc = paging_get_hostmode(v)->write_p2m_entry(p2m, gfn, p, new, level);
- else
- safe_write_pte(p, new);
- return rc;
-}
-
-#ifdef CONFIG_HVM
int __init paging_set_allocation(struct domain *d, unsigned int pages,
bool *preempted)
{
*p = new;
}
-/* Atomically write a P2M entry and update the paging-assistance state
- * appropriately.
- * Arguments: the domain in question, the GFN whose mapping is being updated,
- * a pointer to the entry to be written, the MFN in which the entry resides,
- * the new contents of the entry, and the level in the p2m tree at which
- * we are writing. */
-struct p2m_domain;
-
-int paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
- l1_pgentry_t *p, l1_pgentry_t new,
- unsigned int level);
-
/*
* Called from the guest to indicate that the a process is being
* torn down and its pagetables will soon be discarded.