if ( pagetable_is_null(v->arch.hvm.monitor_table) )
{
- mfn_t mmfn = v->arch.paging.mode->shadow.make_monitor_table(v);
+ mfn_t mmfn = sh_make_monitor_table(
+ v, v->arch.paging.mode->shadow.shadow_levels);
+
v->arch.hvm.monitor_table = pagetable_from_mfn(mmfn);
make_cr3(v, mmfn);
hvm_update_host_cr3(v);
old_mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
v->arch.hvm.monitor_table = pagetable_null();
- new_mfn = v->arch.paging.mode->shadow.make_monitor_table(v);
+ new_mfn = sh_make_monitor_table(
+ v, v->arch.paging.mode->shadow.shadow_levels);
v->arch.hvm.monitor_table = pagetable_from_mfn(new_mfn);
SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
mfn_x(new_mfn));
if ( v == current )
write_ptbase(v);
hvm_update_host_cr3(v);
- old_mode->shadow.destroy_monitor_table(v, old_mfn);
+ sh_destroy_monitor_table(v, old_mfn,
+ old_mode->shadow.shadow_levels);
}
}
mfn_t mfn = pagetable_get_mfn(v->arch.hvm.monitor_table);
if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
- v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn);
+ sh_destroy_monitor_table(
+ v, mfn,
+ v->arch.paging.mode->shadow.shadow_levels);
v->arch.hvm.monitor_table = pagetable_null();
}
#endif /* CONFIG_HVM */
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}
+mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels)
+{
+ struct domain *d = v->domain;
+ mfn_t m4mfn;
+ l4_pgentry_t *l4e;
+
+ ASSERT(!pagetable_get_pfn(v->arch.hvm.monitor_table));
+
+ /* Guarantee we can get the memory we need */
+ shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
+ m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+ mfn_to_page(m4mfn)->shadow_flags = 4;
+
+ l4e = map_domain_page(m4mfn);
+
+ /*
+ * Create a self-linear mapping, but no shadow-linear mapping. A
+ * shadow-linear mapping will either be inserted below when creating
+ * lower level monitor tables, or later in sh_update_cr3().
+ */
+ init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
+
+ if ( shadow_levels < 4 )
+ {
+ mfn_t m3mfn, m2mfn;
+ l3_pgentry_t *l3e;
+
+ /*
+ * Install an l3 table and an l2 table that will hold the shadow
+ * linear map entries. This overrides the empty entry that was
+ * installed by init_xen_l4_slots().
+ */
+ m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+ mfn_to_page(m3mfn)->shadow_flags = 3;
+ l4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)]
+ = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);
+
+ m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
+ mfn_to_page(m2mfn)->shadow_flags = 2;
+ l3e = map_domain_page(m3mfn);
+ l3e[0] = l3e_from_mfn(m2mfn, __PAGE_HYPERVISOR_RW);
+ unmap_domain_page(l3e);
+ }
+
+ unmap_domain_page(l4e);
+
+ return m4mfn;
+}
+
+void sh_destroy_monitor_table(const struct vcpu *v, mfn_t mmfn,
+ unsigned int shadow_levels)
+{
+ struct domain *d = v->domain;
+
+ ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
+
+ if ( shadow_levels < 4 )
+ {
+ mfn_t m3mfn;
+ l4_pgentry_t *l4e = map_domain_page(mmfn);
+ l3_pgentry_t *l3e;
+ unsigned int linear_slot = l4_table_offset(SH_LINEAR_PT_VIRT_START);
+
+ /*
+ * Need to destroy the l3 and l2 monitor pages used
+ * for the linear map.
+ */
+ ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
+ m3mfn = l4e_get_mfn(l4e[linear_slot]);
+ l3e = map_domain_page(m3mfn);
+ ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
+ shadow_free(d, l3e_get_mfn(l3e[0]));
+ unmap_domain_page(l3e);
+ shadow_free(d, m3mfn);
+
+ unmap_domain_page(l4e);
+ }
+
+ /* Put the memory back in the pool */
+ shadow_free(d, mmfn);
+}
+
/**************************************************************************/
/* VRAM dirty tracking support */
int shadow_track_dirty_vram(struct domain *d,
}
-#if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS && defined(CONFIG_HVM)
-mfn_t
-sh_make_monitor_table(struct vcpu *v)
-{
- struct domain *d = v->domain;
-
- ASSERT(pagetable_get_pfn(v->arch.hvm.monitor_table) == 0);
-
- /* Guarantee we can get the memory we need */
- shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
-
- {
- mfn_t m4mfn;
- l4_pgentry_t *l4e;
-
- m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m4mfn)->shadow_flags = 4;
-
- l4e = map_domain_page(m4mfn);
-
- /*
- * Create a self-linear mapping, but no shadow-linear mapping. A
- * shadow-linear mapping will either be inserted below when creating
- * lower level monitor tables, or later in sh_update_cr3().
- */
- init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false);
-
-#if SHADOW_PAGING_LEVELS < 4
- {
- mfn_t m3mfn, m2mfn;
- l3_pgentry_t *l3e;
- /* Install an l3 table and an l2 table that will hold the shadow
- * linear map entries. This overrides the linear map entry that
- * was installed by sh_install_xen_entries_in_l4. */
-
- m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m3mfn)->shadow_flags = 3;
- l4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)]
- = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);
-
- m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m2mfn)->shadow_flags = 2;
- l3e = map_domain_page(m3mfn);
- l3e[0] = l3e_from_mfn(m2mfn, __PAGE_HYPERVISOR_RW);
- unmap_domain_page(l3e);
-
- if ( is_pv_32bit_domain(d) )
- {
- l2_pgentry_t *l2t;
-
- /* For 32-bit PV guests, we need to map the 32-bit Xen
- * area into its usual VAs in the monitor tables */
- m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m3mfn)->shadow_flags = 3;
- l4e[0] = l4e_from_mfn(m3mfn, __PAGE_HYPERVISOR_RW);
-
- m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- mfn_to_page(m2mfn)->shadow_flags = 2;
- l3e = map_domain_page(m3mfn);
- l3e[3] = l3e_from_mfn(m2mfn, _PAGE_PRESENT);
-
- l2t = map_domain_page(m2mfn);
- init_xen_pae_l2_slots(l2t, d);
- unmap_domain_page(l2t);
-
- unmap_domain_page(l3e);
- }
-
- }
-#endif /* SHADOW_PAGING_LEVELS < 4 */
-
- unmap_domain_page(l4e);
-
- return m4mfn;
- }
-}
-#endif /* SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS */
-
/**************************************************************************/
/* These functions also take a virtual address and return the level-N
* shadow table mfn and entry, but they create the shadow pagetables if
shadow_free(d, smfn);
}
-#if SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS && defined(CONFIG_HVM)
-void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
-{
- struct domain *d = v->domain;
- ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
-
-#if SHADOW_PAGING_LEVELS != 4
- {
- mfn_t m3mfn;
- l4_pgentry_t *l4e = map_domain_page(mmfn);
- l3_pgentry_t *l3e;
- int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
-
- /* Need to destroy the l3 and l2 monitor pages used
- * for the linear map */
- ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
- m3mfn = l4e_get_mfn(l4e[linear_slot]);
- l3e = map_domain_page(m3mfn);
- ASSERT(l3e_get_flags(l3e[0]) & _PAGE_PRESENT);
- shadow_free(d, l3e_get_mfn(l3e[0]));
- unmap_domain_page(l3e);
- shadow_free(d, m3mfn);
-
- if ( is_pv_32bit_domain(d) )
- {
- /* Need to destroy the l3 and l2 monitor pages that map the
- * Xen VAs at 3GB-4GB */
- ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
- m3mfn = l4e_get_mfn(l4e[0]);
- l3e = map_domain_page(m3mfn);
- ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
- shadow_free(d, l3e_get_mfn(l3e[3]));
- unmap_domain_page(l3e);
- shadow_free(d, m3mfn);
- }
- unmap_domain_page(l4e);
- }
-#endif
-
- /* Put the memory back in the pool */
- shadow_free(d, mmfn);
-}
-#endif
-
/**************************************************************************/
/* Functions to destroy non-Xen mappings in a pagetable hierarchy.
* These are called from common code when we are running out of shadow
.shadow.cmpxchg_guest_entry = sh_cmpxchg_guest_entry,
#endif
#ifdef CONFIG_HVM
- .shadow.make_monitor_table = sh_make_monitor_table,
- .shadow.destroy_monitor_table = sh_destroy_monitor_table,
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
.shadow.guess_wrmap = sh_guess_wrmap,
#endif
mfn_t gmfn,
uint32_t shadow_type));
-/* Install the xen mappings in various flavours of shadow */
-void sh_install_xen_entries_in_l4(struct domain *, mfn_t gl4mfn, mfn_t sl4mfn);
-
/* Update the shadows in response to a pagetable write from Xen */
int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);
* With user_only == 1, unhooks only the user-mode mappings. */
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
+/*
+ * sh_{make,destroy}_monitor_table() depend only on the number of shadow
+ * levels.
+ */
+mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels);
+void sh_destroy_monitor_table(const struct vcpu *v, mfn_t mmfn,
+ unsigned int shadow_levels);
+
/* VRAM dirty tracking helpers. */
void shadow_vram_get_mfn(mfn_t mfn, unsigned int l1f,
mfn_t sl1mfn, const void *sl1e,
#define sh_rm_write_access_from_sl1p INTERNAL_NAME(sh_rm_write_access_from_sl1p)
#endif
-/* sh_make_monitor_table depends only on the number of shadow levels */
-#define sh_make_monitor_table \
- SHADOW_SH_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)
-#define sh_destroy_monitor_table \
- SHADOW_SH_NAME(sh_destroy_monitor_table, SHADOW_PAGING_LEVELS)
-
-mfn_t sh_make_monitor_table(struct vcpu *v);
-void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn);
-
#if SHADOW_PAGING_LEVELS == 3
#define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
#endif
mfn_t gmfn);
#endif
#ifdef CONFIG_HVM
- mfn_t (*make_monitor_table )(struct vcpu *v);
- void (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
int (*guess_wrmap )(struct vcpu *v,
unsigned long vaddr, mfn_t gmfn);
void (*pagetable_dying )(paddr_t gpa);