{
if ( (page->u.inuse.type_info & PGT_type_mask) ==
PGT_l4_page_table )
- done = !fill_ro_mpt(page_to_mfn(page));
+ done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
page_unlock(page);
}
case 0:
if ( !compat && !VM_ASSIST(d, m2p_strict) &&
!paging_mode_refcounts(d) )
- fill_ro_mpt(cr3_gfn);
+ fill_ro_mpt(_mfn(cr3_gfn));
break;
default:
if ( cr3_page == current->arch.old_guest_table )
break;
case 0:
if ( VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(cr3_gfn);
+ zap_ro_mpt(_mfn(cr3_gfn));
break;
}
}
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
}
-bool fill_ro_mpt(unsigned long mfn)
+bool fill_ro_mpt(mfn_t mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
bool ret = false;
if ( !l4e_get_intpte(l4tab[l4_table_offset(RO_MPT_VIRT_START)]) )
return ret;
}
-void zap_ro_mpt(unsigned long mfn)
+void zap_ro_mpt(mfn_t mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+ l4_pgentry_t *l4tab = map_domain_page(mfn);
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
unmap_domain_page(l4tab);
invalidate_shadow_ldt(curr, 0);
if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
- fill_ro_mpt(mfn);
+ fill_ro_mpt(_mfn(mfn));
curr->arch.guest_table = pagetable_from_pfn(mfn);
update_cr3(curr);
}
if ( VM_ASSIST(currd, m2p_strict) )
- zap_ro_mpt(op.arg1.mfn);
+ zap_ro_mpt(_mfn(op.arg1.mfn));
}
curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
if ( !(v->arch.flags & TF_kernel_mode) && VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(mfn_x(smfn));
+ zap_ro_mpt(smfn);
else if ( (v->arch.flags & TF_kernel_mode) &&
!VM_ASSIST(d, m2p_strict) )
- fill_ro_mpt(mfn_x(smfn));
+ fill_ro_mpt(smfn);
}
#else
#error This should never happen
void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
bool_t zap_ro_mpt);
-bool_t fill_ro_mpt(unsigned long mfn);
-void zap_ro_mpt(unsigned long mfn);
+bool fill_ro_mpt(mfn_t mfn);
+void zap_ro_mpt(mfn_t mfn);
bool is_iomem_page(mfn_t mfn);