return;
}
- dst = map_domain_page(ma>>PAGE_SHIFT);
+ dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
copy_from_paddr(dst + s, paddr + offs, l);
return;
}
- dst = map_domain_page(ma>>PAGE_SHIFT);
+ dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
copy_from_paddr(dst + s, paddr + offs, l);
else
root_table = 0;
- mapping = map_domain_page(root_pfn + root_table);
+ mapping = map_domain_page(_mfn(root_pfn + root_table));
for ( level = root_level; ; level++ )
{
/* For next iteration */
unmap_domain_page(mapping);
- mapping = map_domain_page(pte.walk.base);
+ mapping = map_domain_page(_mfn(pte.walk.base));
}
unmap_domain_page(mapping);
}
/* Map a page of domheap memory */
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
{
unsigned long flags;
lpae_t *map = this_cpu(xen_dommap);
- unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK;
+ unsigned long slot_mfn = mfn_x(mfn) & ~LPAE_ENTRY_MASK;
vaddr_t va;
lpae_t pte;
int i, slot;
va = (DOMHEAP_VIRT_START
+ (slot << SECOND_SHIFT)
- + ((mfn & LPAE_ENTRY_MASK) << THIRD_SHIFT));
+ + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT));
/*
* We may not have flushed this specific subpage at map time,
void flush_page_to_ram(unsigned long mfn)
{
- void *v = map_domain_page(mfn);
+ void *v = map_domain_page(_mfn(mfn));
clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
unmap_domain_page(v);
/* Map for next level */
unmap_domain_page(map);
- map = map_domain_page(pte.p2m.base);
+ map = map_domain_page(_mfn(pte.p2m.base));
}
unmap_domain_page(map);
int i;
if ( mappings[level+1] )
unmap_domain_page(mappings[level+1]);
- mappings[level+1] = map_domain_page(entry->p2m.base);
+ mappings[level+1] = map_domain_page(_mfn(entry->p2m.base));
cur_offset[level] = offset;
/* Any mapping further down is now invalid */
for ( i = level+1; i < 4; i++ )
printk("Failed TTBR0 maddr lookup\n");
goto done;
}
- first = map_domain_page(paddr>>PAGE_SHIFT);
+ first = map_domain_page(_mfn(paddr_to_pfn(paddr)));
offset = addr >> (12+10);
printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
printk("Failed L1 entry maddr lookup\n");
goto done;
}
- second = map_domain_page(paddr>>PAGE_SHIFT);
+ second = map_domain_page(_mfn(paddr_to_pfn(paddr)));
offset = (addr >> 12) & 0x3FF;
printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
offset, paddr, second[offset]);
if ( pgd3val == 0 )
{
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(vaddr)];
unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
return INVALID_MFN;
}
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3t[l3_table_offset(vaddr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
}
}
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(vaddr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
return INVALID_MFN;
}
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(vaddr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
if ( mfn == INVALID_MFN )
break;
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = va + (addr & (PAGE_SIZE-1));
if ( toaddr )
fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
}
} else {
- l4_pgentry_t *l4tab = map_domain_page(pfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
pfn = l4e_get_pfn(*l4tab);
unmap_domain_page(l4tab);
{
l4_pgentry_t *l4tab;
- l4tab = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ l4tab = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
*l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
unmap_domain_page(l4tab);
ASSERT(paging_mode_enabled(v->domain));
- l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ l4start = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
/* Clear entries prior to guest L4 start */
pl4e = l4start + l4_table_offset(v_start);
unsigned long nr_pages)
{
struct page_info *page = NULL;
- l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+ l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
l3_pgentry_t *pl3e = NULL;
l2_pgentry_t *pl2e = NULL;
l1_pgentry_t *pl1e = NULL;
clear_page(pl3e);
*pl4e = l4e_from_page(page, L4_PROT);
} else
- pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+ pl3e = map_domain_page(_mfn(l4e_get_pfn(*pl4e)));
pl3e += l3_table_offset(vphysmap_start);
if ( !l3e_get_intpte(*pl3e) )
*pl3e = l3e_from_page(page, L3_PROT);
}
else
- pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+ pl2e = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
pl2e += l2_table_offset(vphysmap_start);
if ( !l2e_get_intpte(*pl2e) )
*pl2e = l2e_from_page(page, L2_PROT);
}
else
- pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+ pl1e = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
pl1e += l1_table_offset(vphysmap_start);
BUG_ON(l1e_get_intpte(*pl1e));
#define MAPCACHE_L1ENT(idx) \
__linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + pfn_to_paddr(idx))]
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
{
unsigned long flags;
unsigned int idx, i;
struct vcpu_maphash_entry *hashent;
#ifdef NDEBUG
- if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return mfn_to_virt(mfn);
+ if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ return mfn_to_virt(mfn_x(mfn));
#endif
v = mapcache_current_vcpu();
if ( !v || !is_pv_vcpu(v) )
- return mfn_to_virt(mfn);
+ return mfn_to_virt(mfn_x(mfn));
dcache = &v->domain->arch.pv_domain.mapcache;
vcache = &v->arch.pv_vcpu.mapcache;
if ( !dcache->inuse )
- return mfn_to_virt(mfn);
+ return mfn_to_virt(mfn_x(mfn));
perfc_incr(map_domain_page_count);
local_irq_save(flags);
- hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
- if ( hashent->mfn == mfn )
+ hashent = &vcache->hash[MAPHASH_HASHFN(mfn_x(mfn))];
+ if ( hashent->mfn == mfn_x(mfn) )
{
idx = hashent->idx;
ASSERT(idx < dcache->entries);
hashent->refcnt++;
ASSERT(hashent->refcnt);
- ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn);
+ ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn_x(mfn));
goto out;
}
else
{
/* Replace a hash entry instead. */
- i = MAPHASH_HASHFN(mfn);
+ i = MAPHASH_HASHFN(mfn_x(mfn));
do {
hashent = &vcache->hash[i];
if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
}
if ( ++i == MAPHASH_ENTRIES )
i = 0;
- } while ( i != MAPHASH_HASHFN(mfn) );
+ } while ( i != MAPHASH_HASHFN(mfn_x(mfn)) );
}
BUG_ON(idx >= dcache->entries);
spin_unlock(&dcache->lock);
- l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
+ l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR_RW));
out:
local_irq_restore(flags);
else
{
const l4_pgentry_t *l4e =
- map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
unmap_domain_page(l4e);
unsigned int i;
int ret = 0;
- pl1e = map_domain_page(pfn);
+ pl1e = map_domain_page(_mfn(pfn));
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
unsigned int i;
int rc = 0;
- pl2e = map_domain_page(pfn);
+ pl2e = map_domain_page(_mfn(pfn));
for ( i = page->nr_validated_ptes; i < L2_PAGETABLE_ENTRIES; i++ )
{
unsigned int i;
int rc = 0, partial = page->partial_pte;
- pl3e = map_domain_page(pfn);
+ pl3e = map_domain_page(_mfn(pfn));
/*
* PAE guests allocate full pages, but aren't required to initialize
void fill_ro_mpt(unsigned long mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
void zap_ro_mpt(unsigned long mfn)
{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
+ l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
unmap_domain_page(l4tab);
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = map_domain_page(pfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
unsigned int i;
int rc = 0, partial = page->partial_pte;
l1_pgentry_t *pl1e;
unsigned int i;
- pl1e = map_domain_page(pfn);
+ pl1e = map_domain_page(_mfn(pfn));
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( is_guest_l1_slot(i) )
unsigned int i = page->nr_validated_ptes - 1;
int err = 0;
- pl2e = map_domain_page(pfn);
+ pl2e = map_domain_page(_mfn(pfn));
ASSERT(page->nr_validated_ptes);
do {
int rc = 0, partial = page->partial_pte;
unsigned int i = page->nr_validated_ptes - !partial;
- pl3e = map_domain_page(pfn);
+ pl3e = map_domain_page(_mfn(pfn));
do {
if ( is_guest_l3_slot(i) )
{
struct domain *d = page_get_owner(page);
unsigned long pfn = page_to_mfn(page);
- l4_pgentry_t *pl4e = map_domain_page(pfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
int rc = 0, partial = page->partial_pte;
unsigned int i = page->nr_validated_ptes - !partial;
if ( is_pv_32bit_vcpu(v) )
{
- l4tab = map_domain_page(mfn);
+ l4tab = map_domain_page(_mfn(mfn));
mfn = l4e_get_pfn(*l4tab);
}
if ( is_pv_32bit_domain(d) )
{
unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
- l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
+ l4_pgentry_t *pl4e = map_domain_page(_mfn(gt_mfn));
rc = paging_mode_refcounts(d)
? -EINVAL /* Old code was broken, but what should it be? */
}
mfn = page_to_mfn(page);
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
if ( !page_lock(page) )
}
mfn = page_to_mfn(page);
- va = map_domain_page(mfn);
+ va = map_domain_page(_mfn(mfn));
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
if ( !page_lock(page) )
paging_mark_dirty(dom, mfn);
/* All is good so make the update. */
- gdt_pent = map_domain_page(mfn);
+ gdt_pent = map_domain_page(_mfn(mfn));
write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
unmap_domain_page(gdt_pent);
adjust_guest_l1e(nl1e, d);
/* Checked successfully: do the update (write or cmpxchg). */
- pl1e = map_domain_page(mfn);
+ pl1e = map_domain_page(_mfn(mfn));
pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
if ( do_cmpxchg )
{
l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
}
else
- l2tab = map_domain_page(l3e_get_pfn(l3tab[l3_table_offset(va)]));
+ l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
unmap_domain_page(l3tab);
*pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
}
else if ( !l1tab )
- l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+ l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
if ( ppg &&
!(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
{
- const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+ const l2_pgentry_t *l2tab = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
unsigned int i = l1_table_offset(va);
{
if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
{
- l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+ l1_pgentry_t *l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
{
*mfn = _mfn(page_to_mfn(page));
ASSERT(mfn_valid(mfn_x(*mfn)));
- map = map_domain_page(mfn_x(*mfn));
+ map = map_domain_page(*mfn);
return map;
}
/* Map the top-level table and call the tree-walker */
ASSERT(mfn_valid(mfn_x(top_mfn)));
- top_map = map_domain_page(mfn_x(top_mfn));
+ top_map = map_domain_page(top_mfn);
#if GUEST_PAGING_LEVELS == 3
top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
return -ENOMEM;
}
- s = map_domain_page(__page_to_mfn(old_page));
- t = map_domain_page(__page_to_mfn(page));
+ s = map_domain_page(_mfn(__page_to_mfn(old_page)));
+ t = map_domain_page(_mfn(__page_to_mfn(page)));
memcpy(t, s, PAGE_SIZE);
unmap_domain_page(s);
unmap_domain_page(t);
if ( level > 1 )
{
- ept_entry_t *epte = map_domain_page(ept_entry->mfn);
+ ept_entry_t *epte = map_domain_page(_mfn(ept_entry->mfn));
for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
ept_free_entry(p2m, epte + i, level - 1);
unmap_domain_page(epte);
if ( !ept_set_middle_entry(p2m, &new_ept) )
return 0;
- table = map_domain_page(new_ept.mfn);
+ table = map_domain_page(_mfn(new_ept.mfn));
trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
mfn = e.mfn;
unmap_domain_page(*table);
- *table = map_domain_page(mfn);
+ *table = map_domain_page(_mfn(mfn));
*gfn_remainder &= (1UL << shift) - 1;
return GUEST_TABLE_NORMAL_PAGE;
}
static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
{
int rc;
- ept_entry_t *epte = map_domain_page(mfn_x(mfn));
+ ept_entry_t *epte = map_domain_page(mfn);
unsigned int i;
bool_t changed = 0;
unsigned int i, index;
int wrc, rc = 0, ret = GUEST_TABLE_MAP_FAILED;
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
for ( i = ept_get_wl(&p2m->ept); i > target; --i )
{
ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
ept_entry_t e;
unsigned int i;
- epte = map_domain_page(mfn);
+ epte = map_domain_page(_mfn(mfn));
i = (gfn >> (level * EPT_TABLE_ORDER)) & (EPT_PAGETABLE_ENTRIES - 1);
e = atomic_read_ept_entry(&epte[i]);
(target == 0));
ASSERT(!p2m_is_foreign(p2mt) || target == 0);
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
ret = GUEST_TABLE_MAP_FAILED;
for ( i = ept_get_wl(ept); i > target; i-- )
unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
p2m_query_t q, unsigned int *page_order)
{
- ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
unsigned long gfn_remainder = gfn;
ept_entry_t *ept_entry;
u32 index;
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
struct ept_data *ept = &p2m->ept;
- ept_entry_t *table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ ept_entry_t *table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
unsigned long gfn_remainder = gfn;
int i;
{
gfn_remainder &= (1UL << (i*EPT_TABLE_ORDER)) - 1;
- next = map_domain_page(ept_entry->mfn);
+ next = map_domain_page(_mfn(ept_entry->mfn));
unmap_domain_page(table);
char c = 0;
gfn_remainder = gfn;
- table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+ table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
for ( i = ept_get_wl(ept); i > 0; i-- )
{
*/
for ( i = 0; i < (1 << order); i++ )
{
- char *b = map_domain_page(mfn_x(page_to_mfn(page)) + i);
+ char *b = map_domain_page(_mfn(mfn_x(page_to_mfn(page)) + i));
clear_page(b);
unmap_domain_page(b);
}
for ( i=0; i<SUPERPAGE_PAGES; i++ )
{
/* Quick zero-check */
- map = map_domain_page(mfn_x(mfn0) + i);
+ map = map_domain_page(_mfn(mfn_x(mfn0) + i));
for ( j=0; j<16; j++ )
if( *(map+j) != 0 )
/* Finally, do a full zero-check */
for ( i=0; i < SUPERPAGE_PAGES; i++ )
{
- map = map_domain_page(mfn_x(mfn0) + i);
+ map = map_domain_page(_mfn(mfn_x(mfn0) + i));
for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
if( *(map+j) != 0 )
&& ( (mfn_to_page(mfns[i])->count_info & PGC_allocated) != 0 )
&& ( (mfn_to_page(mfns[i])->count_info & (PGC_page_table|PGC_xen_heap)) == 0 )
&& ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) <= max_ref ) )
- map[i] = map_domain_page(mfn_x(mfns[i]));
+ map[i] = map_domain_page(mfns[i]);
else
map[i] = NULL;
}
if ( page_order > PAGE_ORDER_2M )
{
- l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
+ l1_pgentry_t *l3_table = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
p2m_free_entry(p2m, l3_table + i, page_order - 9);
unmap_domain_page(l3_table);
p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
}
- next = map_domain_page(l1e_get_pfn(*p2m_entry));
+ next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
if ( unmap )
unmap_domain_page(*table);
*table = next;
l1_pgentry_t *pent, *plast;
int err = 0;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
for ( i = 4; i-- > level; )
{
remainder = gfn_remainder;
l1_pgentry_t *pent;
int err = 0;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
while ( --level )
{
unsigned long remainder = gfn_remainder;
if ( rc < 0 )
return rc;
- table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
L4_PAGETABLE_SHIFT - PAGE_SHIFT,
L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
{
- l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
+ l4_pgentry_t *l4e = map_domain_page(mfn);
l4e += l4_table_offset(addr);
if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
{
unmap_domain_page(l4e);
}
{
- l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
+ l3_pgentry_t *l3e = map_domain_page(mfn);
l3e += l3_table_offset(addr);
pod_retry_l3:
flags = l3e_get_flags(*l3e);
unmap_domain_page(l3e);
}
- l2e = map_domain_page(mfn_x(mfn));
+ l2e = map_domain_page(mfn);
l2e += l2_table_offset(addr);
pod_retry_l2:
recalc = 1;
unmap_domain_page(l2e);
- l1e = map_domain_page(mfn_x(mfn));
+ l1e = map_domain_page(mfn);
l1e += l1_table_offset(addr);
pod_retry_l1:
flags = l1e_get_flags(*l1e);
ASSERT(hap_enabled(p2m->domain));
- tab = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
{
l1_pgentry_t e = tab[i];
l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
int i4, i3;
- l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ l4e = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
gfn = 0;
for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
continue;
}
- l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
+ l3e = map_domain_page(_mfn(l4e_get_pfn(l4e[i4])));
for ( i3 = 0;
i3 < L3_PAGETABLE_ENTRIES;
i3++ )
}
}
- l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
+ l2e = map_domain_page(_mfn(l3e_get_pfn(l3e[i3])));
for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
{
if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
continue;
}
- l1e = map_domain_page(l2e_get_pfn(l2e[i2]));
+ l1e = map_domain_page(_mfn(l2e_get_pfn(l2e[i2])));
for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
{
int rc;
ASSERT( mfn_valid(mfn) );
- guest_map = map_domain_page(mfn_x(mfn));
+ guest_map = map_domain_page(mfn);
rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
unmap_domain_page(guest_map);
if ( rc )
mfn_t mfn = paging_new_log_dirty_page(d);
if ( mfn_valid(mfn) )
{
- void *leaf = map_domain_page(mfn_x(mfn));
+ void *leaf = map_domain_page(mfn);
clear_page(leaf);
unmap_domain_page(leaf);
}
if ( mfn_valid(mfn) )
{
int i;
- mfn_t *node = map_domain_page(mfn_x(mfn));
+ mfn_t *node = map_domain_page(mfn);
for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
node[i] = _mfn(INVALID_MFN);
unmap_domain_page(node);
static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
{
if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
- return map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+ return map_domain_page(d->arch.paging.log_dirty.top);
return NULL;
}
return -EBUSY;
}
- l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+ l4 = map_domain_page(d->arch.paging.log_dirty.top);
i4 = d->arch.paging.preempt.log_dirty.i4;
i3 = d->arch.paging.preempt.log_dirty.i3;
rc = 0;
if ( !mfn_valid(l4[i4]) )
continue;
- l3 = map_domain_page(mfn_x(l4[i4]));
+ l3 = map_domain_page(l4[i4]);
for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
{
if ( !mfn_valid(l3[i3]) )
continue;
- l2 = map_domain_page(mfn_x(l3[i3]));
+ l2 = map_domain_page(l3[i3]);
for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
if ( mfn_valid(l2[i2]) )
if ( !mfn_valid(mfn) )
goto out;
- l3 = map_domain_page(mfn_x(mfn));
+ l3 = map_domain_page(mfn);
mfn = l3[i3];
if ( !mfn_valid(mfn) )
l3[i3] = mfn = paging_new_log_dirty_node(d);
if ( !mfn_valid(mfn) )
goto out;
- l2 = map_domain_page(mfn_x(mfn));
+ l2 = map_domain_page(mfn);
mfn = l2[i2];
if ( !mfn_valid(mfn) )
l2[i2] = mfn = paging_new_log_dirty_leaf(d);
if ( !mfn_valid(mfn) )
goto out;
- l1 = map_domain_page(mfn_x(mfn));
+ l1 = map_domain_page(mfn);
changed = !__test_and_set_bit(i1, l1);
unmap_domain_page(l1);
if ( changed )
if ( !mfn_valid(mfn) )
return 0;
- l4 = map_domain_page(mfn_x(mfn));
+ l4 = map_domain_page(mfn);
mfn = l4[L4_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l4);
if ( !mfn_valid(mfn) )
return 0;
- l3 = map_domain_page(mfn_x(mfn));
+ l3 = map_domain_page(mfn);
mfn = l3[L3_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l3);
if ( !mfn_valid(mfn) )
return 0;
- l2 = map_domain_page(mfn_x(mfn));
+ l2 = map_domain_page(mfn);
mfn = l2[L2_LOGDIRTY_IDX(pfn)];
unmap_domain_page(l2);
if ( !mfn_valid(mfn) )
return 0;
- l1 = map_domain_page(mfn_x(mfn));
+ l1 = map_domain_page(mfn);
rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
unmap_domain_page(l1);
return rv;
for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
{
- l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+ l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
{
l2 = ((l3 && mfn_valid(l3[i3])) ?
- map_domain_page(mfn_x(l3[i3])) : NULL);
+ map_domain_page(l3[i3]) : NULL);
for ( i2 = 0;
(pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
i2++ )
{
unsigned int bytes = PAGE_SIZE;
l1 = ((l2 && mfn_valid(l2[i2])) ?
- map_domain_page(mfn_x(l2[i2])) : NULL);
+ map_domain_page(l2[i2]) : NULL);
if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
if ( likely(peek) )
if ( (l1e_get_flags(new) & _PAGE_PRESENT)
&& !(l1e_get_flags(new) & _PAGE_PSE)
&& mfn_valid(nmfn) )
- npte = map_domain_page(mfn_x(nmfn));
+ npte = map_domain_page(nmfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
return 0;
/* Can't just pull-through because mfn may have changed */
- l1p = map_domain_page(mfn_x(gw->l1mfn));
+ l1p = map_domain_page(gw->l1mfn);
nl1e.l1 = l1p[guest_l1_table_offset(gw->va)].l1;
unmap_domain_page(l1p);
{
if ( gl1mfn )
*gl1mfn = mfn_x(gw.l1mfn);
- pl1e = map_domain_page(mfn_x(gw.l1mfn)) +
+ pl1e = map_domain_page(gw.l1mfn) +
(guest_l1_table_offset(addr) * sizeof(guest_l1e_t));
}
static inline void *
sh_map_domain_page(mfn_t mfn)
{
- return map_domain_page(mfn_x(mfn));
+ return map_domain_page(mfn);
}
static inline void
if ( per_cpu(stubs.addr, cpu) )
{
unsigned long mfn = per_cpu(stubs.mfn, cpu);
- unsigned char *stub_page = map_domain_page(mfn);
+ unsigned char *stub_page = map_domain_page(_mfn(mfn));
unsigned int i;
memset(stub_page + STUB_BUF_CPU_OFFS(cpu), 0xcc, STUB_BUF_SIZE);
if ( pt_maddr == 0 )
return;
- pt_vaddr = (struct dma_pte *)map_domain_page(pt_maddr >> PAGE_SHIFT_4K);
+ pt_vaddr = (struct dma_pte *)map_domain_page(_mfn(paddr_to_pfn(pt_maddr)));
vmac_update((void *)pt_vaddr, PAGE_SIZE, ctx);
for ( i = 0; i < PTE_NUM; i++ )
{
if ( page->count_info & PGC_page_table )
{
- void *pg = map_domain_page(mfn);
+ void *pg = map_domain_page(_mfn(mfn));
+
vmac_update(pg, PAGE_SIZE, ctx);
unmap_domain_page(pg);
}
mfn = cr3 >> PAGE_SHIFT;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
mfn = l4e_get_pfn(l4e);
unmap_domain_page(l4t);
return real_fault;
page_user &= l4e_get_flags(l4e);
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
mfn = l3e_get_pfn(l3e);
unmap_domain_page(l3t);
if ( l3e_get_flags(l3e) & _PAGE_PSE )
goto leaf;
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
mfn = l2e_get_pfn(l2e);
unmap_domain_page(l2t);
if ( l2e_get_flags(l2e) & _PAGE_PSE )
goto leaf;
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
mfn = l1e_get_pfn(l1e);
unmap_domain_page(l1t);
* context. This is needed for some systems which (ab)use IN/OUT
* to communicate with BIOS code in system-management mode.
*/
- io_emul_stub = map_domain_page(this_cpu(stubs.mfn)) +
+ io_emul_stub = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
(this_cpu(stubs.addr) & ~PAGE_MASK) +
STUB_BUF_SIZE / 2;
/* movq $host_to_guest_gpr_switch,%rcx */
else
{
l4_pgentry_t *pl4e =
- map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
mfn = l4e_get_pfn(*pl4e);
unmap_domain_page(pl4e);
if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
return NULL;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(addr)];
unmap_domain_page(l4t);
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
goto ret;
}
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
goto ret;
}
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
return NULL;
ret:
- return map_domain_page(mfn) + (addr & ~PAGE_MASK);
+ return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK);
}
/*
mfn = (read_cr3()) >> PAGE_SHIFT;
- pl4e = map_domain_page(mfn);
+ pl4e = map_domain_page(_mfn(mfn));
l4e = pl4e[0];
mfn = l4e_get_pfn(l4e);
/* We don't need get page type here since it is current CR3 */
- pl3e = map_domain_page(mfn);
+ pl3e = map_domain_page(_mfn(mfn));
l3e = pl3e[3];
goto unmap;
mfn = l3e_get_pfn(l3e);
- pl2e = map_domain_page(mfn);
+ pl2e = map_domain_page(_mfn(mfn));
l2e = pl2e[l2_table_offset(addr)];
if ( !is_canonical_address(addr) )
return;
- l4t = map_domain_page(mfn);
+ l4t = map_domain_page(_mfn(mfn));
l4e = l4t[l4_table_offset(addr)];
unmap_domain_page(l4t);
mfn = l4e_get_pfn(l4e);
!mfn_valid(mfn) )
return;
- l3t = map_domain_page(mfn);
+ l3t = map_domain_page(_mfn(mfn));
l3e = l3t[l3_table_offset(addr)];
unmap_domain_page(l3t);
mfn = l3e_get_pfn(l3e);
!mfn_valid(mfn) )
return;
- l2t = map_domain_page(mfn);
+ l2t = map_domain_page(_mfn(mfn));
l2e = l2t[l2_table_offset(addr)];
unmap_domain_page(l2t);
mfn = l2e_get_pfn(l2e);
!mfn_valid(mfn) )
return;
- l1t = map_domain_page(mfn);
+ l1t = map_domain_page(_mfn(mfn));
l1e = l1t[l1_table_offset(addr)];
unmap_domain_page(l1t);
mfn = l1e_get_pfn(l1e);
/* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
- stub_page = map_domain_page(this_cpu(stubs.mfn));
+ stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
/* Trampoline for SYSCALL entry from 64-bit mode. */
wrmsrl(MSR_LSTAR, stub_va);
#define cpu_has_amd_erratum(nr) \
cpu_has_amd_erratum(¤t_cpu_data, AMD_ERRATUM_##nr)
-#define get_stub(stb) ({ \
- BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \
- (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
- ((stb).ptr = map_domain_page(this_cpu(stubs.mfn))) + \
- ((stb).addr & ~PAGE_MASK); \
+#define get_stub(stb) ({ \
+ BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1); \
+ (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
+ ((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) + \
+ ((stb).addr & ~PAGE_MASK); \
})
#define put_stub(stb) ({ \
if ( (stb).ptr ) \
buf->have_type = 1;
}
- buf->virt = map_domain_page(buf->frame);
+ buf->virt = map_domain_page(_mfn(buf->frame));
rc = GNTST_okay;
out:
}
}
- v = map_domain_page(mfn);
+ v = map_domain_page(_mfn(mfn));
v += cflush->offset;
if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & GNTTAB_CACHE_CLEAN) )
kimage_entry_t *entry;
int ret = 0;
- page = map_domain_page(mfn);
+ page = map_domain_page(_mfn(mfn));
/*
* Walk the indirection page list, adding destination pages to the
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- entry = page = map_domain_page(mfn);
+ entry = page = map_domain_page(_mfn(mfn));
continue;
case IND_DONE:
goto done;
* Call unmap_domain_page(ptr) after the loop exits.
*/
#define for_each_kimage_entry(image, ptr, entry) \
- for ( ptr = map_domain_page(image->head >> PAGE_SHIFT); \
+ for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head))); \
(entry = *ptr) && !(entry & IND_DONE); \
ptr = (entry & IND_INDIRECTION) ? \
- (unmap_domain_page(ptr), map_domain_page(entry >> PAGE_SHIFT)) \
+ (unmap_domain_page(ptr), map_domain_page(_mfn(paddr_to_pfn(entry)))) \
: ptr + 1 )
static void kimage_free_entry(kimage_entry_t entry)
dchunk = PAGE_SIZE;
schunk = min(dchunk, sbytes);
- dest_va = map_domain_page(dest_mfn);
+ dest_va = map_domain_page(_mfn(dest_mfn));
if ( !dest_va )
return -EINVAL;
int ret = 0;
paddr_t dest = KIMAGE_NO_DEST;
- page = map_domain_page(ind_mfn);
+ page = map_domain_page(_mfn(ind_mfn));
if ( !page )
return -ENOMEM;
break;
case IND_INDIRECTION:
unmap_domain_page(page);
- page = map_domain_page(mfn);
+ page = map_domain_page(_mfn(mfn));
entry = page;
continue;
case IND_DONE:
void clear_domain_page(mfn_t mfn)
{
- void *ptr = map_domain_page(mfn_x(mfn));
+ void *ptr = map_domain_page(mfn);
clear_page(ptr);
unmap_domain_page(ptr);
void copy_domain_page(mfn_t dest, mfn_t source)
{
- const void *src = map_domain_page(mfn_x(source));
- void *dst = map_domain_page(mfn_x(dest));
+ const void *src = map_domain_page(source);
+ void *dst = map_domain_page(dest);
copy_page(dst, src);
unmap_domain_page(dst);
*pcli_mfn = page_to_mfn(page);
*pcli_pfp = page;
- return map_domain_page(*pcli_mfn);
+ return map_domain_page(_mfn(*pcli_mfn));
}
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
ASSERT(pfp != NULL);
tmem_mfn = page_to_mfn(pfp);
- tmem_va = map_domain_page(tmem_mfn);
+ tmem_va = map_domain_page(_mfn(tmem_mfn));
if ( guest_handle_is_null(clibuf) )
{
cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
return -EFAULT;
}
tmem_mfn = page_to_mfn(pfp);
- tmem_va = map_domain_page(tmem_mfn);
+ tmem_va = map_domain_page(_mfn(tmem_mfn));
if ( cli_va )
{
memcpy(cli_va, tmem_va, PAGE_SIZE);
sizeof(ppr_entry_t), tail);
ASSERT(mfn_valid(mfn));
- log_base = map_domain_page(mfn);
+ log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
/* Convert physical device id back into virtual device id */
sizeof(event_entry_t), tail);
ASSERT(mfn_valid(mfn));
- log_base = map_domain_page(mfn);
+ log_base = map_domain_page(_mfn(mfn));
log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
/* re-write physical device id into virtual device id */
gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
gfn = gaddr_64 >> PAGE_SHIFT;
- vaddr = map_domain_page(mfn_x(get_gfn(d, gfn ,&p2mt)));
+ vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
put_gfn(d, gfn);
write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
ASSERT(mfn_valid(dte_mfn));
/* Read guest dte information */
- dte_base = map_domain_page(dte_mfn);
+ dte_base = map_domain_page(_mfn(dte_mfn));
gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
sizeof(cmd_entry_t), head);
ASSERT(mfn_valid(cmd_mfn));
- cmd_base = map_domain_page(cmd_mfn);
+ cmd_base = map_domain_page(_mfn(cmd_mfn));
cmd = cmd_base + head % entries_per_page;
opcode = get_field_from_reg_u32(cmd->data[1],
{
u64 *table, *pte;
- table = map_domain_page(l1_mfn);
+ table = map_domain_page(_mfn(l1_mfn));
pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1);
*pte = 0;
unmap_domain_page(table);
u32 *pde;
bool_t need_flush = 0;
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level));
next_level = merge_level - 1;
/* get pde at merge level */
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = table + pfn_to_pde_idx(gfn, merge_level);
/* get page table of next level */
ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
- ntable = map_domain_page(ntable_maddr >> PAGE_SHIFT);
+ ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
/* get the first mfn of next level */
first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
- table = map_domain_page(pt_mfn);
+ table = map_domain_page(_mfn(pt_mfn));
pde = table + pfn_to_pde_idx(gfn, merge_level);
/* get first mfn */
return 1;
}
- ntable = map_domain_page(ntable_mfn);
+ ntable = map_domain_page(_mfn(ntable_mfn));
first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
if ( first_mfn == 0 )
unsigned int next_level = level - 1;
pt_mfn[level] = next_table_mfn;
- next_table_vaddr = map_domain_page(next_table_mfn);
+ next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
/* Here might be a super page frame */
void *map_vtd_domain_page(u64 maddr)
{
- return map_domain_page(maddr >> PAGE_SHIFT_4K);
+ return map_domain_page(_mfn(paddr_to_pfn(maddr)));
}
void unmap_vtd_domain_page(void *va)
static inline void *
hap_map_domain_page(mfn_t mfn)
{
- return map_domain_page(mfn_x(mfn));
+ return map_domain_page(mfn);
}
static inline void
#define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
#define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
-#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(l2e_get_pfn(x)))
-#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(l3e_get_pfn(x)))
-#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(l4e_get_pfn(x)))
+#define map_l1t_from_l2e(x) ((l1_pgentry_t *)map_domain_page(_mfn(l2e_get_pfn(x))))
+#define map_l2t_from_l3e(x) ((l2_pgentry_t *)map_domain_page(_mfn(l3e_get_pfn(x))))
+#define map_l3t_from_l4e(x) ((l3_pgentry_t *)map_domain_page(_mfn(l4e_get_pfn(x))))
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(a) \
#define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
#define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
+
/* Convert between machine frame numbers and spage-info structures. */
#define __mfn_to_spage(mfn) (spage_table + pfn_to_sdx(mfn))
#define __spage_to_mfn(pg) sdx_to_pfn((unsigned long)((pg) - spage_table))
!= _PAGE_PRESENT )
return NULL;
*gl1mfn = l2e_get_pfn(l2e);
- return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
+ return (l1_pgentry_t *)map_domain_page(_mfn(*gl1mfn)) + l1_table_offset(addr);
}
/* Pull down the mapping we got from guest_map_l1e() */
* Map a given page frame, returning the mapped virtual address. The page is
* then accessible within the current VCPU until a corresponding unmap call.
*/
-void *map_domain_page(unsigned long mfn);
+void *map_domain_page(mfn_t mfn);
/*
* Pass a VA within a page previously mapped in the context of the
void *map_domain_page_global(mfn_t mfn);
void unmap_domain_page_global(const void *va);
-#define __map_domain_page(pg) map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page(pg) map_domain_page(_mfn(__page_to_mfn(pg)))
static inline void *__map_domain_page_global(const struct page_info *pg)
{
}
cache->mfn = mfn;
- cache->va = map_domain_page(mfn);
+ cache->va = map_domain_page(_mfn(mfn));
cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
done:
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_page(mfn) mfn_to_virt(mfn)
+#define map_domain_page(mfn) mfn_to_virt(mfn_x(mfn))
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))