}
/* free ept sub tree behind an entry */
-void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int level)
+static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int level)
{
/* End if the entry is a leaf entry. */
if ( level == 0 || !is_epte_present(ept_entry) ||
#define SUPERPAGE_PAGES (1UL << 9)
#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0)
-unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
+static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn)
{
unsigned long flags;
#ifdef __x86_64__
// Find the next level's P2M entry, checking for out-of-range gfn's...
// Returns NULL on error.
//
-l1_pgentry_t *
+static l1_pgentry_t *
p2m_find_entry(void *table, unsigned long *gfn_remainder,
unsigned long gfn, uint32_t shift, uint32_t max)
{
return (l1_pgentry_t *)table + index;
}
-struct page_info *
-p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
-{
- struct page_info *pg;
-
- ASSERT(p2m);
- ASSERT(p2m->domain);
- ASSERT(p2m->domain->arch.paging.alloc_page);
- pg = p2m->domain->arch.paging.alloc_page(p2m->domain);
- if (pg == NULL)
- return NULL;
-
- page_list_add_tail(pg, &p2m->pages);
- pg->u.inuse.type_info = type | 1 | PGT_validated;
-
- return pg;
-}
-
-void
-p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
-{
- ASSERT(pg);
- ASSERT(p2m);
- ASSERT(p2m->domain);
- ASSERT(p2m->domain->arch.paging.free_page);
-
- page_list_del(pg, &p2m->pages);
- p2m->domain->arch.paging.free_page(p2m->domain, pg);
-
- return;
-}
-
/* Free intermediate tables from a p2m sub-tree */
-void
+static void
p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
{
/* End if the entry is a leaf entry. */
/* Walk the whole p2m table, changing any entries of the old type
* to the new type. This is used in hardware-assisted paging to
* quickly enable or diable log-dirty tracking */
-void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt)
+static void p2m_change_type_global(struct p2m_domain *p2m,
+ p2m_type_t ot, p2m_type_t nt)
{
unsigned long mfn, gfn, flags;
l1_pgentry_t l1e_content;
return rc;
}
+struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type)
+{
+ struct page_info *pg;
+
+ ASSERT(p2m);
+ ASSERT(p2m->domain);
+ ASSERT(p2m->domain->arch.paging.alloc_page);
+ pg = p2m->domain->arch.paging.alloc_page(p2m->domain);
+ if (pg == NULL)
+ return NULL;
+
+ page_list_add_tail(pg, &p2m->pages);
+ pg->u.inuse.type_info = type | 1 | PGT_validated;
+
+ return pg;
+}
+
+void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
+{
+ ASSERT(pg);
+ ASSERT(p2m);
+ ASSERT(p2m->domain);
+ ASSERT(p2m->domain->arch.paging.free_page);
+
+ page_list_del(pg, &p2m->pages);
+ p2m->domain->arch.paging.free_page(p2m->domain, pg);
+
+ return;
+}
+
// Allocate a new p2m table for a domain.
//
// The structure of the p2m table is that of a pagetable for xen (i.e. it is
/* Init the datastructures for later use by the p2m code */
int p2m_init(struct domain *d);
-/* PTE flags for various types of p2m entry */
-unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn);
-
/* Allocate a new p2m table for a domain.
*
* Returns 0 for success or -errno. */
int p2m_alloc_table(struct p2m_domain *p2m);
-/* Find the next level's P2M entry, checking for out-of-range gfn's...
- * Returns NULL on error.
- */
-l1_pgentry_t *
-p2m_find_entry(void *table, unsigned long *gfn_remainder,
- unsigned long gfn, uint32_t shift, uint32_t max);
-
/* Return all the p2m resources to Xen. */
void p2m_teardown(struct p2m_domain *p2m);
void p2m_final_teardown(struct domain *d);
}
/* Change types across all p2m entries in a domain */
-void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
/* Compare-exchange the type of a single p2m entry */