{
struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
struct page_info *pg;
- void *p;
- int i;
+ unsigned int i;
memset(s, 0, sizeof(*s));
spin_lock_init(&s->lock);
if ( pg == NULL )
break;
s->vram_page[i] = pg;
- p = __map_domain_page(pg);
- clear_page(p);
- unmap_domain_page(p);
+ clear_domain_page(_mfn(page_to_mfn(pg)));
}
if ( i == ARRAY_SIZE(s->vram_page) )
if ( cpu_has_vmx_vmcs_shadowing )
{
struct page_info *vmread_bitmap, *vmwrite_bitmap;
- unsigned long *vr, *vw;
+ unsigned long *vw;
vmread_bitmap = alloc_domheap_page(NULL, 0);
if ( !vmread_bitmap )
}
v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
+ clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+
vmwrite_bitmap = alloc_domheap_page(NULL, 0);
if ( !vmwrite_bitmap )
{
}
v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
- vr = __map_domain_page(vmread_bitmap);
vw = __map_domain_page(vmwrite_bitmap);
-
- clear_page(vr);
clear_page(vw);
/*
set_bit(IO_BITMAP_B, vw);
set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
- unmap_domain_page(vr);
unmap_domain_page(vw);
}
{
struct page_info *top, *pg;
struct domain *d = p2m->domain;
- void *p;
+ mfn_t mfn;
p2m_lock(p2m);
p2m->np2m_base = P2M_BASE_EADDR;
/* Zap the top level of the trie */
- top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
- p = __map_domain_page(top);
- clear_page(p);
- unmap_domain_page(p);
+ mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+ clear_domain_page(mfn);
/* Make sure nobody else is using this p2m table */
nestedhvm_vmcx_flushtlb(p2m);
/* Free the rest of the trie pages back to the paging pool */
+ top = mfn_to_page(mfn);
while ( (pg = page_list_remove_head(&p2m->pages)) )
if ( pg != top )
d->arch.paging.free_page(d, pg);
static mfn_t paging_new_log_dirty_leaf(struct domain *d)
{
mfn_t mfn = paging_new_log_dirty_page(d);
+
if ( mfn_valid(mfn) )
- {
- void *leaf = map_domain_page(mfn);
- clear_page(leaf);
- unmap_domain_page(leaf);
- }
+ clear_domain_page(mfn);
+
return mfn;
}
unsigned int pages = shadow_size(shadow_type);
struct page_list_head tmp_list;
cpumask_t mask;
- void *p;
- int i;
+ unsigned int i;
ASSERT(paging_locked_by_me(d));
ASSERT(shadow_type != SH_type_none);
flush_tlb_mask(&mask);
}
/* Now safe to clear the page for reuse */
- p = __map_domain_page(sp);
- ASSERT(p != NULL);
- clear_page(p);
- unmap_domain_page(p);
+ clear_domain_page(page_to_mfn(sp));
INIT_PAGE_LIST_ENTRY(&sp->list);
page_list_add(sp, &tmp_list);
sp->u.sh.type = shadow_type;
void scrub_one_page(struct page_info *pg)
{
- void *p;
-
if ( unlikely(pg->count_info & PGC_broken) )
return;
- p = __map_domain_page(pg);
-
#ifndef NDEBUG
/* Avoid callers relying on allocations returning zeroed pages. */
- memset(p, 0xc2, PAGE_SIZE);
+ unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
#else
/* For a production build, clear_page() is the fastest way to scrub. */
- clear_page(p);
+ clear_domain_page(_mfn(page_to_mfn(pg)));
#endif
-
- unmap_domain_page(p);
}
static void dump_heap(unsigned char key)