return rc;
}
-#ifdef CONFIG_HVM
/*
* Mark (via clearing the U flag) as needing P2M type re-calculation all valid
* present entries at the targeted level for the passed in GFN range, which is
return err;
}
-#endif /* CONFIG_HVM */
/*
* Handle possibly necessary P2M type re-calculation (U flag clear for a
return (p2m_is_valid(*t) || p2m_is_any_ram(*t)) ? mfn : INVALID_MFN;
}
-#ifdef CONFIG_HVM
-
static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
p2m_type_t ot, p2m_type_t nt)
{
return err;
}
-#endif /* CONFIG_HVM */
-
#if P2M_AUDIT
static long p2m_pt_audit_p2m(struct p2m_domain *p2m)
{
{
p2m->set_entry = p2m_pt_set_entry;
p2m->get_entry = p2m_pt_get_entry;
-#ifdef CONFIG_HVM
p2m->recalc = do_recalc;
p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
-#endif
/* Still too early to use paging_mode_hap(). */
if ( hap_enabled(p2m->domain) )
p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
unsigned int *page_order, bool_t locked)
{
+#ifdef CONFIG_HVM
mfn_t mfn;
gfn_t gfn = _gfn(gfn_l);
+#endif
/* Unshare makes no sense withuot populate. */
if ( q & P2M_UNSHARE )
return _mfn(gfn_l);
}
+#ifdef CONFIG_HVM
if ( locked )
/* Grab the lock here, don't release until put_gfn */
gfn_lock(p2m, gfn, 0);
}
return mfn;
+#endif
}
void __put_gfn(struct p2m_domain *p2m, unsigned long gfn)
return page;
}
+#ifdef CONFIG_HVM
/* Returns: 0 for success, -errno for failure */
int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
return rc;
}
+#endif
mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level)
{
p2m_teardown_hostp2m(d);
}
+#ifdef CONFIG_HVM
+
static int __must_check
p2m_remove_page(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
unsigned int page_order)
p2m_type_t t;
p2m_access_t a;
- /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
- if ( !paging_mode_translate(p2m->domain) )
- return 0;
-
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_x(gfn), mfn_x(mfn));
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
+ /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
+ if ( !paging_mode_translate(d) )
+ return 0;
+
gfn_lock(p2m, gfn, page_order);
rc = p2m_remove_page(p2m, gfn, mfn, page_order);
gfn_unlock(p2m, gfn, page_order);
return rc;
}
+#endif /* CONFIG_HVM */
+
int
guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int page_order)
int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
p2m_access_t p2ma, unsigned int flag)
{
+#ifdef CONFIG_HVM
p2m_type_t p2mt;
p2m_access_t a;
gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret;
+#endif
- if ( !paging_mode_translate(p2m->domain) )
+ if ( !paging_mode_translate(d) )
{
if ( !is_iommu_enabled(d) )
return 0;
IOMMUF_readable | IOMMUF_writable);
}
+#ifdef CONFIG_HVM
gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
gfn_unlock(p2m, gfn, 0);
return ret;
+#endif
}
int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
{
+#ifdef CONFIG_HVM
p2m_type_t p2mt;
p2m_access_t a;
gfn_t gfn = _gfn(gfn_l);
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int ret;
+#endif
if ( !paging_mode_translate(d) )
{
return iommu_legacy_unmap(d, _dfn(gfn_l), 1ul << PAGE_ORDER_4K);
}
+#ifdef CONFIG_HVM
gfn_lock(p2m, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
}
return ret;
+#endif
}
#ifdef CONFIG_MEM_SHARING
/* Pages used to construct the p2m */
struct page_list_head pages;
+#ifdef CONFIG_HVM
int (*set_entry)(struct p2m_domain *p2m,
gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_query_t q,
unsigned int *page_order,
bool_t *sve);
-#ifdef CONFIG_HVM
int (*recalc)(struct p2m_domain *p2m,
unsigned long gfn);
void (*enable_hardware_log_dirty)(struct p2m_domain *p2m);
unsigned int page_order, p2m_type_t p2mt,
p2m_access_t p2ma);
+#if defined(CONFIG_HVM)
/* Set up function pointers for PT implementation: only for use by p2m code */
extern void p2m_pt_init(struct p2m_domain *p2m);
+#elif defined(CONFIG_SHADOW_PAGING)
+# define p2m_pt_init shadow_p2m_init
+#else
+static inline void p2m_pt_init(struct p2m_domain *p2m) {}
+#endif
void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
p2m_query_t q, uint32_t *pfec);
unsigned long gfn, mfn_t mfn);
/* Remove a page from a domain's p2m table */
+#ifdef CONFIG_HVM
int __must_check
guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
unsigned int page_order);
+#else
+static inline int
+guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order)
+{
+ return 0;
+}
+#endif
/* Map MMIO regions in the p2m: start_gfn and nr describe the range in
* * the guest physical address space to map, starting from the machine