/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn. With user_only == 1,
* unhooks only the user-mode mappings. */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn, int user_only)
+void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only)
{
struct page_info *sp = mfn_to_page(smfn);
switch ( sp->u.sh.type )
{
case SH_type_l2_32_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(d, smfn, user_only);
break;
case SH_type_l2_pae_shadow:
case SH_type_l2h_pae_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(d, smfn, user_only);
break;
case SH_type_l4_64_shadow:
- SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v, smfn, user_only);
+ SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(d, smfn, user_only);
break;
default:
SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
if ( !pagetable_is_null(v2->arch.shadow_table[i]) )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(d,
pagetable_get_mfn(v2->arch.shadow_table[i]), 0);
/* See if that freed up enough space */
for_each_vcpu(d, v)
for ( i = 0 ; i < 4 ; i++ )
if ( !pagetable_is_null(v->arch.shadow_table[i]) )
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(d,
pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
#if GUEST_PAGING_LEVELS == 2
-void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
+void sh_unhook_32b_mappings(struct domain *d, mfn_t sl2mfn, int user_only)
{
- struct domain *d = v->domain;
shadow_l2e_t *sl2e;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
#elif GUEST_PAGING_LEVELS == 3
-void sh_unhook_pae_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
+void sh_unhook_pae_mappings(struct domain *d, mfn_t sl2mfn, int user_only)
/* Walk a PAE l2 shadow, unhooking entries from all the subshadows */
{
- struct domain *d = v->domain;
shadow_l2e_t *sl2e;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, d, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
#elif GUEST_PAGING_LEVELS == 4
-void sh_unhook_64b_mappings(struct vcpu *v, mfn_t sl4mfn, int user_only)
+void sh_unhook_64b_mappings(struct domain *d, mfn_t sl4mfn, int user_only)
{
- struct domain *d = v->domain;
shadow_l4e_t *sl4e;
SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, d, {
if ( !user_only || (sl4e->l4 & _PAGE_USER) )
{
gmfn = _mfn(mfn_to_page(smfn)->v.sh.back);
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
- shadow_unhook_mappings(v, smfn, 1/* user pages only */);
+ shadow_unhook_mappings(d, smfn, 1/* user pages only */);
flush = 1;
}
}
if ( mfn_valid(smfn) )
{
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
- shadow_unhook_mappings(v, smfn, 1/* user pages only */);
+ shadow_unhook_mappings(d, smfn, 1/* user pages only */);
/* Now flush the TLB: we removed toplevel mappings. */
flush_tlb_mask(d->domain_dirty_cpumask);
}
extern void
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl2mfn, int user_only);
+ (struct domain *d, mfn_t sl2mfn, int user_only);
extern void
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl3mfn, int user_only);
+ (struct domain *d, mfn_t sl3mfn, int user_only);
extern void
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl4mfn, int user_only);
+ (struct domain *d, mfn_t sl4mfn, int user_only);
extern int
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
/* Unhook the non-Xen mappings in this top-level shadow mfn.
* With user_only == 1, unhooks only the user-mode mappings. */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn, int user_only);
+void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Allow a shadowed page to go out of sync */