}
/*
+ * Fill an L4 with Xen entries.
+ *
* This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
* values a guest may have left there from alloc_l4_table().
+ *
+ * l4t and l4mfn are mandatory, but l4mfn doesn't need to be the mfn under
+ * *l4t. All other parameters are optional and will either fill or zero the
+ * appropriate slots. Pagetables not shared with guests will gain the
+ * extended directmap.
*/
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool zap_ro_mpt)
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+ const struct domain *d, mfn_t sl4mfn, bool ro_mpt)
{
- /* Xen private mappings. */
- memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+ /*
+ * PV vcpus need a shortened directmap. HVM and Idle vcpus get the full
+ * directmap.
+ */
+ bool short_directmap = d && !paging_mode_external(d);
+
+ /* Slot 256: RO M2P (if applicable). */
+ l4t[l4_table_offset(RO_MPT_VIRT_START)] =
+ ro_mpt ? idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)]
+ : l4e_empty();
+
+ /* Slot 257: PCI MMCFG. */
+ l4t[l4_table_offset(PCI_MCFG_VIRT_START)] =
+ idle_pg_table[l4_table_offset(PCI_MCFG_VIRT_START)];
+
+ /* Slot 258: Self linear mappings. */
+ ASSERT(!mfn_eq(l4mfn, INVALID_MFN));
+ l4t[l4_table_offset(LINEAR_PT_VIRT_START)] =
+ l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
+
+ /* Slot 259: Shadow linear mappings (if applicable) .*/
+ l4t[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
+ mfn_eq(sl4mfn, INVALID_MFN) ? l4e_empty() :
+ l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
+
+ /* Slot 260: Per-domain mappings (if applicable). */
+ l4t[l4_table_offset(PERDOMAIN_VIRT_START)] =
+ d ? l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW)
+ : l4e_empty();
+
+ /* Slot 261-: text/data/bss, RW M2P, vmap, frametable, directmap. */
#ifndef NDEBUG
- if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
+ if ( short_directmap &&
+ unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
{
- l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
- root_pgt_pv_xen_slots];
+ /*
+ * If using highmem-start=, artificially shorten the directmap to
+ * simulate very large machines.
+ */
+ l4_pgentry_t *next;
+
+ memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+ &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+ (ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots -
+ l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+
+ next = &l4t[ROOT_PAGETABLE_FIRST_XEN_SLOT + root_pgt_pv_xen_slots];
if ( l4e_get_intpte(split_l4e) )
*next++ = split_l4e;
memset(next, 0,
- _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+ _p(&l4t[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
}
-#else
- BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+ else
#endif
- l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
- l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
- if ( zap_ro_mpt || is_pv_32bit_domain(d) )
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+ {
+ unsigned int slots = (short_directmap
+ ? ROOT_PAGETABLE_PV_XEN_SLOTS
+ : ROOT_PAGETABLE_XEN_SLOTS);
+
+ memcpy(&l4t[l4_table_offset(XEN_VIRT_START)],
+ &idle_pg_table[l4_table_offset(XEN_VIRT_START)],
+ (ROOT_PAGETABLE_FIRST_XEN_SLOT + slots -
+ l4_table_offset(XEN_VIRT_START)) * sizeof(*l4t));
+ }
}
bool fill_ro_mpt(mfn_t mfn)
if ( rc >= 0 )
{
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+ init_xen_l4_slots(pl4e, _mfn(pfn),
+ d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
atomic_inc(&d->arch.pv_domain.nr_l4_pages);
rc = 0;
}
return 0;
}
-static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
-{
- struct domain *d = v->domain;
- l4_pgentry_t *l4e;
-
- l4e = map_domain_page(l4mfn);
-
- /* Copy the common Xen mappings from the idle domain */
- memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-
- /* Install a linear mapping */
- l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_mfn(l4mfn, __PAGE_HYPERVISOR_RW);
-
- unmap_domain_page(l4e);
-}
-
static mfn_t hap_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
struct page_info *pg;
+ l4_pgentry_t *l4e;
mfn_t m4mfn;
ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
if ( (pg = hap_alloc(d)) == NULL )
goto oom;
+
m4mfn = page_to_mfn(pg);
- hap_install_xen_entries_in_l4(v, m4mfn);
+ l4e = map_domain_page(m4mfn);
+
+ init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+ unmap_domain_page(l4e);
+
return m4mfn;
oom:
#endif
-
-/**************************************************************************/
-/* Functions to install Xen mappings and linear mappings in shadow pages */
-
-// XXX -- this function should probably be moved to shadow-common.c, but that
-// probably wants to wait until the shadow types have been moved from
-// shadow-types.h to shadow-private.h
-//
-#if GUEST_PAGING_LEVELS == 4
-void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
-{
- shadow_l4e_t *sl4e;
- unsigned int slots;
-
- sl4e = map_domain_page(sl4mfn);
- BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
-
- /* Copy the common Xen mappings from the idle domain */
- slots = (shadow_mode_external(d)
- ? ROOT_PAGETABLE_XEN_SLOTS
- : ROOT_PAGETABLE_PV_XEN_SLOTS);
- memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- slots * sizeof(l4_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =
- shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
- __PAGE_HYPERVISOR_RW);
-
- if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) &&
- !VM_ASSIST(d, m2p_strict) )
- {
- /* open coded zap_ro_mpt(mfn_x(sl4mfn)): */
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
- }
-
- /*
- * Linear mapping slots:
- *
- * Calling this function with gl4mfn == sl4mfn is used to construct a
- * monitor table for translated domains. In this case, gl4mfn forms the
- * self-linear mapping (i.e. not pointing into the translated domain), and
- * the shadow-linear slot is skipped. The shadow-linear slot is either
- * filled when constructing lower level monitor tables, or via
- * sh_update_cr3() for 4-level guests.
- *
- * Calling this function with gl4mfn != sl4mfn is used for non-translated
- * guests, where the shadow-linear slot is actually self-linear, and the
- * guest-linear slot points into the guests view of its pagetables.
- */
- if ( shadow_mode_translate(d) )
- {
- ASSERT(mfn_eq(gl4mfn, sl4mfn));
-
- sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l4e_empty();
- }
- else
- {
- ASSERT(!mfn_eq(gl4mfn, sl4mfn));
-
- sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR_RW);
- }
-
- sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
- shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR_RW);
-
- unmap_domain_page(sl4e);
-}
-#endif
-
-
/**************************************************************************/
/* Create a shadow of a given guest page.
*/
{
#if GUEST_PAGING_LEVELS == 4
case SH_type_l4_shadow:
- sh_install_xen_entries_in_l4(v->domain, gmfn, smfn);
- break;
+ {
+ shadow_l4e_t *l4t = map_domain_page(smfn);
+
+ BUILD_BUG_ON(sizeof(l4_pgentry_t) != sizeof(shadow_l4e_t));
+
+ init_xen_l4_slots(l4t, gmfn, d, smfn, (!is_pv_32bit_domain(d) &&
+ VM_ASSIST(d, m2p_strict)));
+ unmap_domain_page(l4t);
+ }
+ break;
#endif
#if GUEST_PAGING_LEVELS >= 3
case SH_type_l2h_shadow:
{
mfn_t m4mfn;
+ l4_pgentry_t *l4e;
+
m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- sh_install_xen_entries_in_l4(d, m4mfn, m4mfn);
- /* Remember the level of this table */
mfn_to_page(m4mfn)->shadow_flags = 4;
+
+ l4e = map_domain_page(m4mfn);
+
+ /*
+ * Create a self-linear mapping, but no shadow-linear mapping. A
+ * shadow-linear mapping will either be inserted below when creating
+ * lower level monitor tables, or later in sh_update_cr3().
+ */
+ init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+
#if SHADOW_PAGING_LEVELS < 4
{
mfn_t m3mfn, m2mfn;
- l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
/* Install an l3 table and an l2 table that will hold the shadow
* linear map entries. This overrides the linear map entry that
* was installed by sh_install_xen_entries_in_l4. */
- l4e = map_domain_page(m4mfn);
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
unmap_domain_page(l3e);
}
- unmap_domain_page(l4e);
}
#endif /* SHADOW_PAGING_LEVELS < 4 */
+
+ unmap_domain_page(l4e);
+
return m4mfn;
}
}
l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
}
clear_page(l4tab);
- init_guest_l4_table(l4tab, d, 0);
+ init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
+ d, INVALID_MFN, true);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
if ( is_pv_32bit_domain(d) )
v->arch.guest_table_user = v->arch.guest_table;
{
struct page_info *pg;
l4_pgentry_t *l4tab;
+ mfn_t mfn;
pg = alloc_domheap_page(v->domain, MEMF_no_owner);
if ( pg == NULL )
return -ENOMEM;
- l4tab = __map_domain_page(pg);
+ mfn = page_to_mfn(pg);
+ l4tab = map_domain_page(mfn);
clear_page(l4tab);
- init_guest_l4_table(l4tab, v->domain, 1);
+ init_xen_l4_slots(l4tab, mfn, v->domain, INVALID_MFN, false);
unmap_domain_page(l4tab);
/* This page needs to look like a pagetable so that it can be shadowed */
int preemptible);
void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
- bool_t zap_ro_mpt);
+void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
+ const struct domain *d, mfn_t sl4mfn, bool ro_mpt);
bool fill_ro_mpt(mfn_t mfn);
void zap_ro_mpt(mfn_t mfn);