int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
{
mfn_t r_mfn;
- struct p2m_domain *p2m;
p2m_type_t pt;
/* Always trust dom0's MCE handler will prevent future access */
if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
return -ENOSYS;
- p2m = p2m_get_hostp2m(d);
- ASSERT(p2m);
-
- /* This only happen for PoD memory, which should be handled seperetely */
- if (gfn > p2m->max_mapped_pfn)
- return -EINVAL;
-
- r_mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+ r_mfn = gfn_to_mfn_query(d, gfn, &pt);
if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
{
ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
- p2m_change_type(p2m, gfn, pt, p2m_ram_broken);
+ p2m_change_type(d, gfn, pt, p2m_ram_broken);
return 0;
}
return INVALID_MFN;
}
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(dp), gfn, &gfntype));
+ mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype));
if ( p2m_is_readonly(gfntype) && toaddr )
{
DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
}
if ( is_hvm_domain(d) )
- {
- p2m_pod_dump_data(p2m_get_hostp2m(d));
- }
+ p2m_pod_dump_data(d);
spin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->xenpage_list )
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
for ( i = 0; i < nr_mfns; i++ )
- set_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i, _mfn(mfn+i));
+ set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
}
else
{
gfn, mfn, nr_mfns);
for ( i = 0; i < nr_mfns; i++ )
- clear_mmio_p2m_entry(p2m_get_hostp2m(d), gfn+i);
+ clear_mmio_p2m_entry(d, gfn+i);
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
}
paddr_t value = ram_gpa;
int value_is_ptr = (p_data == NULL);
struct vcpu *curr = current;
- struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain);
ioreq_t *p = get_ioreq(curr);
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
int rc;
/* Check for paged out page */
- ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt);
+ ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, ram_gfn);
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
if ( p2m_is_shared(p2mt) )
unsigned long saddr, daddr, bytes;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
- struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
p2m_type_t p2mt;
int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
char *buf;
if ( rc != X86EMUL_OKAY )
return rc;
- (void)gfn_to_mfn(p2m, sgpa >> PAGE_SHIFT, &p2mt);
+ (void)gfn_to_mfn(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
- (void)gfn_to_mfn(p2m, dgpa >> PAGE_SHIFT, &p2mt);
+ (void)gfn_to_mfn(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
return hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
{
struct page_info *page;
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t p2mt;
unsigned long mfn;
void *va;
- mfn = mfn_x(gfn_to_mfn_unshare(p2m, gmfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt));
if ( !p2m_is_ram(p2mt) )
return -EINVAL;
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, gmfn);
+ p2m_mem_paging_populate(d, gmfn);
return -ENOENT;
}
if ( p2m_is_shared(p2mt) )
p2m_access_t p2ma;
mfn_t mfn;
struct vcpu *v = current;
- struct p2m_domain *p2m = NULL;
+ struct p2m_domain *p2m;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
#ifdef __x86_64__
/* Check if the page has been paged out */
if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
- p2m_mem_paging_populate(p2m, gfn);
+ p2m_mem_paging_populate(v->domain, gfn);
/* Mem sharing: unshare the page and try again */
if ( p2mt == p2m_ram_shared )
{
- mem_sharing_unshare_page(p2m, gfn, 0);
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ mem_sharing_unshare_page(p2m->domain, gfn, 0);
return 1;
}
#endif
* page.
*/
paging_mark_dirty(v->domain, mfn_x(mfn));
- p2m_change_type(p2m, gfn, p2m_ram_logdirty, p2m_ram_rw);
+ p2m_change_type(v->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
return 1;
}
{
struct vcpu *v = current;
p2m_type_t p2mt;
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
{
/* The guest CR3 must be pointing to the guest physical. */
gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
- mfn = mfn_x(gfn_to_mfn(p2m, gfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt));
if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
!get_page(mfn_to_page(mfn), v->domain))
{
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
- value >> PAGE_SHIFT, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt));
if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
!get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
{
unsigned long mfn;
p2m_type_t p2mt;
- struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+ struct domain *d = current->domain;
mfn = mfn_x(writable
- ? gfn_to_mfn_unshare(p2m, gfn, &p2mt)
- : gfn_to_mfn(p2m, gfn, &p2mt));
+ ? gfn_to_mfn_unshare(d, gfn, &p2mt)
+ : gfn_to_mfn(d, gfn, &p2mt));
if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
return NULL;
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, gfn);
+ p2m_mem_paging_populate(d, gfn);
return NULL;
}
ASSERT(mfn_valid(mfn));
if ( writable )
- paging_mark_dirty(current->domain, mfn);
+ paging_mark_dirty(d, mfn);
return map_domain_page(mfn);
}
void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
{
struct vcpu *curr = current;
- struct p2m_domain *p2m;
unsigned long gfn, mfn;
p2m_type_t p2mt;
char *p;
return HVMCOPY_unhandleable;
#endif
- p2m = p2m_get_hostp2m(curr->domain);
-
while ( todo > 0 )
{
count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
gfn = addr >> PAGE_SHIFT;
}
- mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn, &p2mt));
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, gfn);
+ p2m_mem_paging_populate(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
struct xen_hvm_modified_memory a;
struct domain *d;
- struct p2m_domain *p2m;
unsigned long pfn;
if ( copy_from_guest(&a, arg, 1) )
if ( !paging_mode_log_dirty(d) )
goto param_fail3;
- p2m = p2m_get_hostp2m(d);
for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
{
p2m_type_t t;
- mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
+ mfn_t mfn = gfn_to_mfn(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(p2m, pfn);
+ p2m_mem_paging_populate(d, pfn);
rc = -EINVAL;
goto param_fail3;
rc = -EINVAL;
if ( is_hvm_domain(d) )
{
- gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t);
+ gfn_to_mfn_unshare(d, a.pfn, &t);
if ( p2m_is_mmio(t) )
a.mem_type = HVMMEM_mmio_dm;
else if ( p2m_is_readonly(t) )
{
struct xen_hvm_set_mem_type a;
struct domain *d;
- struct p2m_domain *p2m;
unsigned long pfn;
/* Interface types to internal p2m types */
if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
goto param_fail4;
- p2m = p2m_get_hostp2m(d);
for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
{
p2m_type_t t;
p2m_type_t nt;
mfn_t mfn;
- mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
+ mfn = gfn_to_mfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(p2m, pfn);
-
+ p2m_mem_paging_populate(d, pfn);
rc = -EINVAL;
goto param_fail4;
}
}
else
{
- nt = p2m_change_type(p2m, pfn, t, memtype[a.hvmmem_type]);
+ nt = p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]);
if ( nt != t )
{
gdprintk(XENLOG_WARNING,
mfn_t mfn;
int success;
- mfn = gfn_to_mfn_unshare(p2m, pfn, &t);
+ mfn = gfn_to_mfn_unshare(d, pfn, &t);
p2m_lock(p2m);
success = p2m->set_entry(p2m, pfn, mfn, 0, t, memaccess[a.hvmmem_access]);
{
struct domain *d = v->domain;
p2m_type_t p2mt;
- gfn_to_mfn(p2m_get_hostp2m(d), paddr_to_pfn(gpaddr), &p2mt);
+ gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
if (p2m_is_ram(p2mt))
gdprintk(XENLOG_WARNING,
"Conflict occurs for a given guest l1e flags:%x "
int i;
int sign = p->df ? -1 : 1;
p2m_type_t p2mt;
- struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+ struct domain *d = current->domain;
if ( p->data_is_ptr )
{
if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
HVMCOPY_okay )
{
- (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
+ (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
/*
* The only case we handle is vga_mem <-> vga_mem.
* Anything else disables caching and leaves it to qemu-dm.
if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
HVMCOPY_okay )
{
- (void)gfn_to_mfn(p2m, data >> PAGE_SHIFT, &p2mt);
+ (void)gfn_to_mfn(d, data >> PAGE_SHIFT, &p2mt);
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
return 0;
{
if ( c->cr0 & X86_CR0_PG )
{
- mfn = mfn_x(gfn_to_mfn(p2m, c->cr3 >> PAGE_SHIFT, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
{
gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
unsigned long gfn = gpa >> PAGE_SHIFT;
mfn_t mfn;
p2m_type_t p2mt;
+ p2m_access_t p2ma;
struct p2m_domain *p2m = NULL;
ret = hvm_hap_nested_page_fault(gpa, 0, ~0ul, 0, 0, 0, 0);
p2m = p2m_get_p2m(v);
_d.gpa = gpa;
_d.qualification = 0;
- _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
+ _d.mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &_d.p2mt, &p2ma, p2m_query));
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
}
if ( p2m == NULL )
p2m = p2m_get_p2m(v);
/* Everything else is an error. */
- mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn_type_p2m(p2m, gfn, &p2mt, &p2ma, p2m_guest);
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
gpa, mfn_x(mfn), p2mt);
{
if ( cr0 & X86_CR0_PG )
{
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
- cr3 >> PAGE_SHIFT, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
{
gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
if ( cr3 & 0x1fUL )
goto crash;
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
- cr3 >> PAGE_SHIFT, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
if ( !p2m_is_ram(p2mt) )
goto crash;
if ( apic_va == NULL )
return -ENOMEM;
share_xen_page_with_guest(virt_to_page(apic_va), d, XENSHARE_writable);
- set_mmio_p2m_entry(
- p2m_get_hostp2m(d), paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
+ set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE),
_mfn(virt_to_mfn(apic_va)));
d->arch.hvm_domain.vmx.apic_access_mfn = virt_to_mfn(apic_va);
unsigned long gla, gfn = gpa >> PAGE_SHIFT;
mfn_t mfn;
p2m_type_t p2mt;
- struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+ struct domain *d = current->domain;
if ( tb_init_done )
{
_d.gpa = gpa;
_d.qualification = qualification;
- _d.mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &_d.p2mt));
+ _d.mfn = mfn_x(gfn_to_mfn_query(d, gfn, &_d.p2mt));
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
}
return;
/* Everything else is an error. */
- mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
"gpa %#"PRIpaddr", mfn %#lx, type %i.\n",
qualification,
(qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
gpa, mfn_x(mfn), p2mt);
- ept_walk_table(current->domain, gfn);
+ ept_walk_table(d, gfn);
if ( qualification & EPT_GLA_VALID )
{
gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
}
- domain_crash(current->domain);
+ domain_crash(d);
}
static void vmx_failed_vmentry(unsigned int exit_reason,
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
{
/* Translate foreign guest addresses. */
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pg_dom),
- l1e_get_pfn(nl1e), &p2mt));
+ mfn = mfn_x(gfn_to_mfn(pg_dom, l1e_get_pfn(nl1e), &p2mt));
if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
return -EINVAL;
ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
req.ptr -= cmd;
gmfn = req.ptr >> PAGE_SHIFT;
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pt_owner), gmfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(pt_owner, gmfn, &p2mt));
if ( !p2m_is_valid(p2mt) )
mfn = INVALID_MFN;
if ( p2m_is_paged(p2mt) )
{
- p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn);
+ p2m_mem_paging_populate(pg_owner, gmfn);
rc = -ENOENT;
break;
{
l1_pgentry_t l1e = l1e_from_intpte(req.val);
p2m_type_t l1e_p2mt;
- gfn_to_mfn(p2m_get_hostp2m(pg_owner),
- l1e_get_pfn(l1e), &l1e_p2mt);
+ gfn_to_mfn(pg_owner, l1e_get_pfn(l1e), &l1e_p2mt);
if ( p2m_is_paged(l1e_p2mt) )
{
- p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
- l1e_get_pfn(l1e));
+ p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
rc = -ENOENT;
break;
}
/* Unshare the page for RW foreign mappings */
if ( l1e_get_flags(l1e) & _PAGE_RW )
{
- rc = mem_sharing_unshare_page(p2m_get_hostp2m(pg_owner),
+ rc = mem_sharing_unshare_page(pg_owner,
l1e_get_pfn(l1e),
0);
if ( rc )
{
l2_pgentry_t l2e = l2e_from_intpte(req.val);
p2m_type_t l2e_p2mt;
- gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), &l2e_p2mt);
+ gfn_to_mfn(pg_owner, l2e_get_pfn(l2e), &l2e_p2mt);
if ( p2m_is_paged(l2e_p2mt) )
{
- p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
- l2e_get_pfn(l2e));
+ p2m_mem_paging_populate(pg_owner, l2e_get_pfn(l2e));
rc = -ENOENT;
break;
}
{
l3_pgentry_t l3e = l3e_from_intpte(req.val);
p2m_type_t l3e_p2mt;
- gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), &l3e_p2mt);
+ gfn_to_mfn(pg_owner, l3e_get_pfn(l3e), &l3e_p2mt);
if ( p2m_is_paged(l3e_p2mt) )
{
- p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
- l3e_get_pfn(l3e));
+ p2m_mem_paging_populate(pg_owner, l3e_get_pfn(l3e));
rc = -ENOENT;
break;
}
{
l4_pgentry_t l4e = l4e_from_intpte(req.val);
p2m_type_t l4e_p2mt;
- gfn_to_mfn(p2m_get_hostp2m(pg_owner),
- l4e_get_pfn(l4e), &l4e_p2mt);
+ gfn_to_mfn(pg_owner, l4e_get_pfn(l4e), &l4e_p2mt);
if ( p2m_is_paged(l4e_p2mt) )
{
- p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
- l4e_get_pfn(l4e));
+ p2m_mem_paging_populate(pg_owner, l4e_get_pfn(l4e));
rc = -ENOENT;
break;
}
p2mt = p2m_grant_map_ro;
else
p2mt = p2m_grant_map_rw;
- rc = guest_physmap_add_entry(p2m_get_hostp2m(current->domain),
+ rc = guest_physmap_add_entry(current->domain,
addr >> PAGE_SHIFT, frame, 0, p2mt);
if ( rc )
return GNTST_general_error;
if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
return GNTST_general_error;
- old_mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &type);
+ old_mfn = gfn_to_mfn(d, gfn, &type);
if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
{
gdprintk(XENLOG_WARNING,
{
p2m_type_t p2mt;
- xatp.idx = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d),
- xatp.idx, &p2mt));
+ xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
/* If the page is still shared, exit early */
if ( p2m_is_shared(p2mt) )
{
p2m_type_t *p2mt,
uint32_t *rc)
{
+ p2m_access_t a;
+
/* Translate the gfn, unsharing if shared */
- *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt);
+ *mfn = gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), p2mt, &a, p2m_unshare);
if ( p2m_is_paging(*p2mt) )
{
- p2m_mem_paging_populate(p2m, gfn_x(gfn));
-
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
*rc = _PAGE_PAGED;
return NULL;
}
mfn_t top_mfn;
void *top_map;
p2m_type_t p2mt;
+ p2m_access_t p2ma;
walk_t gw;
/* Get the top-level table's MFN */
- top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt);
+ top_mfn = gfn_to_mfn_type_p2m(p2m, cr3 >> PAGE_SHIFT,
+ &p2mt, &p2ma, p2m_unshare);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
if ( missing == 0 )
{
gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
- gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt);
+ gfn_to_mfn_type_p2m(p2m, gfn_x(gfn), &p2mt, &p2ma, p2m_unshare);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, gfn_x(gfn));
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
/* set l1e entries of P2M table to be read-only. */
for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
- p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
return 0;
/* set l1e entries of P2M table with normal mode */
for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
- p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw);
+ p2m_change_type(d, i, p2m_ram_logdirty, p2m_ram_rw);
flush_tlb_mask(d->domain_dirty_cpumask);
return 0;
/* set l1e entries of P2M table to be read-only. */
for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
- p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_type(d, i, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
}
hap_unlock(d);
/* set l1e entries of P2M table to be read-only. */
- p2m_change_entry_type_global(p2m_get_hostp2m(d),
- p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
return 0;
}
hap_unlock(d);
/* set l1e entries of P2M table with normal mode */
- p2m_change_entry_type_global(p2m_get_hostp2m(d),
- p2m_ram_logdirty, p2m_ram_rw);
+ p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
return 0;
}
static void hap_clean_dirty_bitmap(struct domain *d)
{
/* set l1e entries of P2M table to be read-only. */
- p2m_change_entry_type_global(p2m_get_hostp2m(d),
- p2m_ram_rw, p2m_ram_logdirty);
+ p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
flush_tlb_mask(d->domain_dirty_cpumask);
}
{
mfn_t mfn;
p2m_type_t p2mt;
+ p2m_access_t p2ma;
- /* we use gfn_to_mfn_query() function to walk L0 P2M table */
- mfn = gfn_to_mfn_query(p2m, L1_gpa >> PAGE_SHIFT, &p2mt);
+ /* walk L0 P2M table */
+ mfn = gfn_to_mfn_type_p2m(p2m, L1_gpa >> PAGE_SHIFT, &p2mt, &p2ma, p2m_query);
if ( p2m_is_paging(p2mt) || p2m_is_shared(p2mt) || !p2m_is_ram(p2mt) )
return NESTEDHVM_PAGEFAULT_ERROR;
/* Get MFN of ring page */
guest_get_eff_l1e(v, ring_addr, &l1e);
gfn = l1e_get_pfn(l1e);
- ring_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt);
+ ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
rc = -EINVAL;
if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
/* Get MFN of shared page */
guest_get_eff_l1e(v, shared_addr, &l1e);
gfn = l1e_get_pfn(l1e);
- shared_mfn = gfn_to_mfn(p2m_get_hostp2m(dom_mem_event), gfn, &p2mt);
+ shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
rc = -EINVAL;
if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
int mem_paging_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE(void) u_domctl)
{
- int rc;
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
/* Only HAP is supported */
if ( !hap_enabled(d) )
return -ENODEV;
case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE:
{
unsigned long gfn = mec->gfn;
- rc = p2m_mem_paging_nominate(p2m, gfn);
+ return p2m_mem_paging_nominate(d, gfn);
}
break;
case XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT:
{
unsigned long gfn = mec->gfn;
- rc = p2m_mem_paging_evict(p2m, gfn);
+ return p2m_mem_paging_evict(d, gfn);
}
break;
case XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP:
{
unsigned long gfn = mec->gfn;
- rc = p2m_mem_paging_prep(p2m, gfn);
+ return p2m_mem_paging_prep(d, gfn);
}
break;
case XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME:
{
- p2m_mem_paging_resume(p2m);
- rc = 0;
+ p2m_mem_paging_resume(d);
+ return 0;
}
break;
default:
- rc = -ENOSYS;
+ return -ENOSYS;
break;
}
-
- return rc;
}
list_for_each(le, &e->gfns)
{
struct domain *d;
- struct p2m_domain *p2m;
p2m_type_t t;
mfn_t mfn;
g->domain, g->gfn, mfn_x(e->mfn));
continue;
}
- p2m = p2m_get_hostp2m(d);
- mfn = gfn_to_mfn(p2m, g->gfn, &t);
+ mfn = gfn_to_mfn(d, g->gfn, &t);
if(mfn_x(mfn) != mfn_x(e->mfn))
MEM_SHARING_DEBUG("Incorrect P2M for d=%d, PFN=%lx."
"Expecting MFN=%ld, got %ld\n",
p2m_type_t p2mt;
mfn_t mfn;
- mfn = gfn_to_mfn(p2m_get_hostp2m(d), gfn, &p2mt);
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
printk("Debug for domain=%d, gfn=%lx, ",
d->domain_id,
return mem_sharing_debug_gfn(d, gfn);
}
-int mem_sharing_nominate_page(struct p2m_domain *p2m,
+int mem_sharing_nominate_page(struct domain *d,
unsigned long gfn,
int expected_refcnt,
shr_handle_t *phandle)
shr_handle_t handle;
shr_hash_entry_t *hash_entry;
struct gfn_info *gfn_info;
- struct domain *d = p2m->domain;
*phandle = 0UL;
shr_lock();
- mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
/* Check if mfn is valid */
ret = -EINVAL;
}
/* Change the p2m type */
- if(p2m_change_type(p2m, gfn, p2mt, p2m_ram_shared) != p2mt)
+ if(p2m_change_type(d, gfn, p2mt, p2m_ram_shared) != p2mt)
{
/* This is unlikely, as the type must have changed since we've checked
* it a few lines above.
list_del(&gfn->list);
d = get_domain_by_id(gfn->domain);
BUG_ON(!d);
- BUG_ON(set_shared_p2m_entry(p2m_get_hostp2m(d), gfn->gfn, se->mfn) == 0);
+ BUG_ON(set_shared_p2m_entry(d, gfn->gfn, se->mfn) == 0);
put_domain(d);
list_add(&gfn->list, &se->gfns);
put_page_and_type(cpage);
return ret;
}
-int mem_sharing_unshare_page(struct p2m_domain *p2m,
+int mem_sharing_unshare_page(struct domain *d,
unsigned long gfn,
uint16_t flags)
{
struct gfn_info *gfn_info = NULL;
shr_handle_t handle;
struct list_head *le;
- struct domain *d = p2m->domain;
mem_sharing_audit();
/* Remove the gfn_info from the list */
shr_lock();
- mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
/* Has someone already unshared it? */
if (!p2m_is_shared(p2mt)) {
unmap_domain_page(s);
unmap_domain_page(t);
- BUG_ON(set_shared_p2m_entry(p2m, gfn, page_to_mfn(page)) == 0);
+ BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page)) == 0);
put_page_and_type(old_page);
private_page_found:
else
atomic_dec(&nr_saved_mfns);
- if(p2m_change_type(p2m, gfn, p2m_ram_shared, p2m_ram_rw) !=
+ if(p2m_change_type(d, gfn, p2m_ram_shared, p2m_ram_rw) !=
p2m_ram_shared)
{
printk("Could not change p2m type.\n");
shr_handle_t handle;
if(!mem_sharing_enabled(d))
return -EINVAL;
- rc = mem_sharing_nominate_page(p2m_get_hostp2m(d), gfn, 0, &handle);
+ rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
mec->u.nominate.handle = handle;
mem_sharing_audit();
}
return -EINVAL;
if(mem_sharing_gref_to_gfn(d, gref, &gfn) < 0)
return -EINVAL;
- rc = mem_sharing_nominate_page(p2m_get_hostp2m(d),
- gfn, 3, &handle);
+ rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
mec->u.nominate.handle = handle;
mem_sharing_audit();
}
{
p2m_type_t t;
- gfn_to_mfn_query(p2m, gpfn + i, &t);
+ gfn_to_mfn_query(d, gpfn + i, &t);
if ( t == p2m_populate_on_demand )
pod++;
mfn_t mfn;
p2m_type_t t;
- mfn = gfn_to_mfn_query(p2m, gpfn + i, &t);
+ mfn = gfn_to_mfn_query(d, gpfn + i, &t);
if ( t == p2m_populate_on_demand )
{
set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
return ret;
}
-void
-p2m_pod_dump_data(struct p2m_domain *p2m)
+void p2m_pod_dump_data(struct domain *d)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
printk(" PoD entries=%d cachesize=%d\n",
p2m->pod.entry_count, p2m->pod.count);
}
for ( i=0; i<SUPERPAGE_PAGES; i++ )
{
- mfn = gfn_to_mfn_query(p2m, gfn + i, &type);
+ mfn = gfn_to_mfn_query(d, gfn + i, &type);
if ( i == 0 )
{
/* First, get the gfn list, translate to mfns, and map the pages. */
for ( i=0; i<count; i++ )
{
- mfns[i] = gfn_to_mfn_query(p2m, gfns[i], types + i);
+ mfns[i] = gfn_to_mfn_query(d, gfns[i], types + i);
/* If this is ram, and not a pagetable or from the xen heap, and probably not mapped
elsewhere, map it; otherwise, skip. */
if ( p2m_is_ram(types[i])
/* FIXME: Figure out how to avoid superpages */
for ( i=p2m->pod.reclaim_single; i > 0 ; i-- )
{
- gfn_to_mfn_query(p2m, i, &t );
+ gfn_to_mfn_query(p2m->domain, i, &t );
if ( p2m_is_ram(t) )
{
gfns[j] = i;
/* Make sure all gpfns are unused */
for ( i = 0; i < (1UL << order); i++ )
{
- omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+ omfn = gfn_to_mfn_query(d, gfn + i, &ot);
if ( p2m_is_ram(ot) )
{
printk("%s: gfn_to_mfn returned type %d!\n",
return mfn;
}
-
static mfn_t
-p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a,
- p2m_query_t q)
+p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn,
+ p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
{
mfn_t mfn;
paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
/* Not implemented except with EPT */
*a = p2m_access_rwx;
- mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
-
if ( gfn > p2m->max_mapped_pfn )
/* This pfn is higher than the highest the p2m map currently holds */
return _mfn(INVALID_MFN);
if ( p2m == p2m_get_hostp2m(current->domain) )
return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q);
+ mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+
#if CONFIG_PAGING_LEVELS >= 4
{
l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
if ( test_linear && (gfn <= p2m->max_mapped_pfn) )
{
- lp2mfn = mfn_x(gfn_to_mfn_query(p2m, gfn, &type));
+ lp2mfn = mfn_x(gfn_to_mfn_type_p2m(p2m, gfn, &type, p2m_query));
if ( lp2mfn != mfn_x(p2mfn) )
{
P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
return p2m_init_nestedp2m(d);
}
-void p2m_change_entry_type_global(struct p2m_domain *p2m,
+void p2m_change_entry_type_global(struct domain *d,
p2m_type_t ot, p2m_type_t nt)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_lock(p2m);
p2m->change_entry_type_global(p2m, ot, nt);
p2m_unlock(p2m);
{
mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
- BUG_ON(mem_sharing_unshare_page(p2m, gfn, MEM_SHARING_DESTROY_GFN));
+ {
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
+ }
+
}
#endif
}
void
-guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
+guest_physmap_remove_page(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int page_order)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_lock(p2m);
audit_p2m(p2m, 1);
p2m_remove_page(p2m, gfn, mfn, page_order);
}
int
-guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int page_order,
p2m_type_t t)
{
- struct domain *d = p2m->domain;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long i, ogfn;
p2m_type_t ot;
mfn_t omfn;
/* First, remove m->p mappings for existing p->m mappings */
for ( i = 0; i < (1UL << page_order); i++ )
{
- omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+ omfn = gfn_to_mfn_query(d, gfn + i, &ot);
if ( p2m_is_grant(ot) )
{
/* Really shouldn't be unmapping grant maps this way */
* address */
P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
mfn + i, ogfn, gfn + i);
- omfn = gfn_to_mfn_query(p2m, ogfn, &ot);
+ omfn = gfn_to_mfn_query(d, ogfn, &ot);
if ( p2m_is_ram(ot) )
{
ASSERT(mfn_valid(omfn));
/* Modify the p2m type of a single gfn from ot to nt, returning the
* entry's previous type. Resets the access permissions. */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
+p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
p2m_type_t ot, p2m_type_t nt)
{
p2m_type_t pt;
mfn_t mfn;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
p2m_lock(p2m);
- mfn = gfn_to_mfn_query(p2m, gfn, &pt);
+ mfn = gfn_to_mfn_query(d, gfn, &pt);
if ( pt == ot )
set_p2m_entry(p2m, gfn, mfn, 0, nt, p2m->default_access);
}
int
-set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
int rc = 0;
p2m_type_t ot;
mfn_t omfn;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
- if ( !paging_mode_translate(p2m->domain) )
+ if ( !paging_mode_translate(d) )
return 0;
- omfn = gfn_to_mfn_query(p2m, gfn, &ot);
+ omfn = gfn_to_mfn_query(d, gfn, &ot);
if ( p2m_is_grant(ot) )
{
- domain_crash(p2m->domain);
+ domain_crash(d);
return 0;
}
else if ( p2m_is_ram(ot) )
if ( 0 == rc )
gdprintk(XENLOG_ERR,
"set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
- mfn_x(gfn_to_mfn(p2m, gfn, &ot)));
+ mfn_x(gfn_to_mfn(d, gfn, &ot)));
return rc;
}
int
-clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn)
+clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
{
int rc = 0;
mfn_t mfn;
p2m_type_t t;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
- if ( !paging_mode_translate(p2m->domain) )
+ if ( !paging_mode_translate(d) )
return 0;
- mfn = gfn_to_mfn(p2m, gfn, &t);
+ mfn = gfn_to_mfn(d, gfn, &t);
/* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
}
int
-set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn)
+set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc = 0;
int need_lock = !p2m_locked_by_me(p2m);
p2m_type_t ot;
if ( !paging_mode_translate(p2m->domain) )
return 0;
- omfn = gfn_to_mfn_query(p2m, gfn, &ot);
+ omfn = gfn_to_mfn_query(p2m->domain, gfn, &ot);
/* At the moment we only allow p2m change if gfn has already been made
* sharable first */
ASSERT(p2m_is_shared(ot));
}
#ifdef __x86_64__
-int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
{
struct page_info *page;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t p2mt;
mfn_t mfn;
int ret;
- mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn(p2m->domain, gfn, &p2mt);
/* Check if mfn is valid */
ret = -EINVAL;
return ret;
}
-int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
{
struct page_info *page;
p2m_type_t p2mt;
mfn_t mfn;
- struct domain *d = p2m->domain;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Get mfn */
- mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn(d, gfn, &p2mt);
if ( unlikely(!mfn_valid(mfn)) )
return -EINVAL;
return 0;
}
-void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
{
struct vcpu *v = current;
mem_event_request_t req;
- struct domain *d = p2m->domain;
/* Check that there's space on the ring for this request */
if ( mem_event_check_ring(d) == 0)
}
}
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
{
struct vcpu *v = current;
mem_event_request_t req;
p2m_type_t p2mt;
- struct domain *d = p2m->domain;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there's space on the ring for this request */
if ( mem_event_check_ring(d) )
/* Fix p2m mapping */
/* XXX: It seems inefficient to have this here, as it's only needed
* in one case (ept guest accessing paging out page) */
- gfn_to_mfn(p2m, gfn, &p2mt);
+ gfn_to_mfn(d, gfn, &p2mt);
if ( p2mt == p2m_ram_paged )
{
p2m_lock(p2m);
mem_event_put_request(d, &req);
}
-int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn)
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
{
struct page_info *page;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Get a free page */
page = alloc_domheap_page(p2m->domain, 0);
return 0;
}
-void p2m_mem_paging_resume(struct p2m_domain *p2m)
+void p2m_mem_paging_resume(struct domain *d)
{
- struct domain *d = p2m->domain;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
mem_event_response_t rsp;
p2m_type_t p2mt;
mfn_t mfn;
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
{
- mfn = gfn_to_mfn(p2m, rsp.gfn, &p2mt);
+ mfn = gfn_to_mfn(d, rsp.gfn, &p2mt);
p2m_lock(p2m);
set_p2m_entry(p2m, rsp.gfn, mfn, 0, p2m_ram_rw, p2m->default_access);
set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
/* Iterate over VRAM to track dirty bits. */
for ( i = 0; i < nr; i++ ) {
- mfn_t mfn = gfn_to_mfn(p2m, begin_pfn + i, &t);
+ mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
struct page_info *page;
int dirty = 0;
paddr_t sl1ma = dirty_vram->sl1ma[i];
/* was clean for more than two seconds, try to disable guest
* write access */
for ( i = begin_pfn; i < end_pfn; i++ ) {
- mfn_t mfn = gfn_to_mfn(p2m, i, &t);
+ mfn_t mfn = gfn_to_mfn(d, i, &t);
if (mfn_x(mfn) != INVALID_MFN)
flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0);
}
shadow_l4e_t *sl4p = se;
mfn_t sl3mfn = _mfn(INVALID_MFN);
struct domain *d = v->domain;
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t p2mt;
int result = 0;
if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
{
gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
- mfn_t gl3mfn = gfn_to_mfn_query(p2m, gl3gfn, &p2mt);
+ mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
if ( p2m_is_ram(p2mt) )
sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
else if ( p2mt != p2m_populate_on_demand )
mfn_t sl2mfn = _mfn(INVALID_MFN);
p2m_type_t p2mt;
int result = 0;
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
perfc_incr(shadow_validate_gl3e_calls);
if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
{
gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
- mfn_t gl2mfn = gfn_to_mfn_query(p2m, gl2gfn, &p2mt);
+ mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
if ( p2m_is_ram(p2mt) )
sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
else if ( p2mt != p2m_populate_on_demand )
guest_l2e_t new_gl2e = *(guest_l2e_t *)new_ge;
shadow_l2e_t *sl2p = se;
mfn_t sl1mfn = _mfn(INVALID_MFN);
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
p2m_type_t p2mt;
int result = 0;
}
else
{
- mfn_t gl1mfn = gfn_to_mfn_query(p2m, gl1gfn, &p2mt);
+ mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
if ( p2m_is_ram(p2mt) )
sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
else if ( p2mt != p2m_populate_on_demand )
shadow_l1e_t *sl1p = se;
gfn_t gfn;
mfn_t gmfn;
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
p2m_type_t p2mt;
int result = 0;
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
perfc_incr(shadow_validate_gl1e_calls);
gfn = guest_l1e_get_gfn(new_gl1e);
- gmfn = gfn_to_mfn_query(p2m, gfn, &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
result |= shadow_set_l1e(v, sl1p, new_sl1e, p2mt, sl1mfn);
shadow_l1e_t nsl1e;
gfn = guest_l1e_get_gfn(gl1e);
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
rc |= shadow_set_l1e(v, sl1p, nsl1e, p2mt, sl1mfn);
/* Look at the gfn that the l1e is pointing at */
gfn = guest_l1e_get_gfn(gl1e);
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
/* Propagate the entry. */
l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
/* What mfn is the guest trying to access? */
gfn = guest_l1e_get_gfn(gw.l1e);
- gmfn = gfn_to_mfn_guest(p2m_get_hostp2m(d), gfn, &p2mt);
+ gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);
if ( shadow_mode_refcounts(d) &&
((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
{
gl2gfn = guest_l3e_get_gfn(gl3e[i]);
- gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
+ gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
if ( p2m_is_ram(p2mt) )
flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
}
if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
{
gl2gfn = guest_l3e_get_gfn(gl3e[i]);
- gl2mfn = gfn_to_mfn_query(p2m_get_hostp2m(d), gl2gfn, &p2mt);
+ gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
if ( p2m_is_ram(p2mt) )
sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
? SH_type_l2h_shadow
if ( gcr3 == gpa )
fast_path = 1;
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> PAGE_SHIFT), &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
if ( !mfn_valid(gmfn) || !p2m_is_ram(p2mt) )
{
printk(XENLOG_DEBUG "sh_pagetable_dying: gpa not valid %"PRIpaddr"\n",
{
/* retrieving the l2s */
gl2a = guest_l3e_get_paddr(gl3e[i]);
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gl2a >> PAGE_SHIFT), &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, _gfn(gl2a >> PAGE_SHIFT), &p2mt);
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_pae_shadow);
}
shadow_lock(v->domain);
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), _gfn(gpa >> PAGE_SHIFT), &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, _gfn(gpa >> PAGE_SHIFT), &p2mt);
#if GUEST_PAGING_LEVELS == 2
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l2_32_shadow);
#else
mfn_t mfn;
p2m_type_t p2mt;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
/* Translate the VA to a GFN */
- gfn = sh_gva_to_gfn(v, p2m, vaddr, &pfec);
+ gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec);
if ( gfn == INVALID_GFN )
{
if ( is_hvm_vcpu(v) )
/* Translate the GFN to an MFN */
/* PoD: query only if shadow lock is held (to avoid deadlock) */
if ( shadow_locked_by_me(v->domain) )
- mfn = gfn_to_mfn_query(p2m, _gfn(gfn), &p2mt);
+ mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
else
- mfn = gfn_to_mfn(p2m, _gfn(gfn), &p2mt);
+ mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
if ( p2m_is_readonly(p2mt) )
return _mfn(READONLY_GFN);
{
gfn = guest_l1e_get_gfn(*gl1e);
mfn = shadow_l1e_get_mfn(*sl1e);
- gmfn = gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt);
+ gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
if ( !p2m_is_grant(p2mt) && mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
" --> %" PRI_mfn " != mfn %" PRI_mfn,
shadow_l2e_t *sl2e;
mfn_t mfn, gmfn, gl2mfn;
gfn_t gfn;
- struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
p2m_type_t p2mt;
char *s;
int done = 0;
mfn = shadow_l2e_get_mfn(*sl2e);
gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
? get_fl1_shadow_status(v, gfn)
- : get_shadow_status(v, gfn_to_mfn_query(p2m, gfn, &p2mt),
+ : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt),
SH_type_l1_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
" --> %" PRI_mfn " != mfn %" PRI_mfn,
gfn_x(gfn),
(guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
- : mfn_x(gfn_to_mfn_query(p2m,
+ : mfn_x(gfn_to_mfn_query(v->domain,
gfn, &p2mt)), mfn_x(gmfn), mfn_x(mfn));
}
});
{
gfn = guest_l3e_get_gfn(*gl3e);
mfn = shadow_l3e_get_mfn(*sl3e);
- gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain), gfn, &p2mt),
+ gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt),
((GUEST_PAGING_LEVELS == 3 ||
is_pv_32on64_vcpu(v))
&& !shadow_mode_external(v->domain)
{
gfn = guest_l4e_get_gfn(*gl4e);
mfn = shadow_l4e_get_mfn(*sl4e);
- gmfn = get_shadow_status(v, gfn_to_mfn_query(p2m_get_hostp2m(v->domain),
+ gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain,
gfn, &p2mt),
SH_type_l3_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
/* Override gfn_to_mfn to work with gfn_t */
#undef gfn_to_mfn_query
-#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_query)
+#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_query)
#undef gfn_to_mfn_guest
-#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_guest)
+#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_guest)
/* The shadow types needed for the various levels. */
WARN();
if ( dev->domain )
- p2m_change_entry_type_global(p2m_get_hostp2m(dev->domain),
+ p2m_change_entry_type_global(dev->domain,
p2m_mmio_direct, p2m_mmio_direct);
if ( !dev->domain || !paging_mode_translate(dev->domain) )
{
#define gfn_to_mfn_private(_d, _gfn) ({ \
p2m_type_t __p2mt; \
unsigned long __x; \
- __x = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(_d), _gfn, &__p2mt)); \
+ __x = mfn_x(gfn_to_mfn_unshare((_d), (_gfn), &__p2mt)); \
BUG_ON(p2m_is_shared(__p2mt)); /* XXX fixme */ \
if ( !p2m_is_valid(__p2mt) ) \
__x = INVALID_MFN; \
{
int rc = GNTST_okay;
#if defined(P2M_PAGED_TYPES) || defined(P2M_SHARED_TYPES)
- struct p2m_domain *p2m;
p2m_type_t p2mt;
mfn_t mfn;
- p2m = p2m_get_hostp2m(rd);
if ( readonly )
- mfn = gfn_to_mfn(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn(rd, gfn, &p2mt);
else
{
- mfn = gfn_to_mfn_unshare(p2m, gfn, &p2mt);
+ mfn = gfn_to_mfn_unshare(rd, gfn, &p2mt);
BUG_ON(p2m_is_shared(p2mt));
/* XXX Here, and above in gfn_to_mfn_private, need to handle
* XXX failure to unshare. */
*frame = mfn_x(mfn);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(p2m, gfn);
+ p2m_mem_paging_populate(rd, gfn);
rc = GNTST_eagain;
}
} else {
unsigned long mfn;
#ifdef CONFIG_X86
- mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt));
if ( unlikely(p2m_is_paging(p2mt)) )
{
guest_physmap_remove_page(d, gmfn, mfn, 0);
- p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
+ p2m_mem_paging_drop_page(d, gmfn);
return 1;
}
#else
p2m_type_t p2mt;
/* Shared pages cannot be exchanged */
- mfn = mfn_x(gfn_to_mfn_unshare(p2m_get_hostp2m(d), gmfn + k, &p2mt));
+ mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt));
if ( p2m_is_shared(p2mt) )
{
rc = -ENOMEM;
struct page_info *page;
int ret;
- cli_mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(current->domain), cmfn, &t));
+ cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
return NULL;
page = mfn_to_page(cli_mfn);
/* Override gfn_to_mfn to work with gfn_t */
#undef gfn_to_mfn
-#define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
+#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
/* Types of the guest's page tables and access functions for them */
typedef uint64_t shr_handle_t;
unsigned int mem_sharing_get_nr_saved_mfns(void);
-int mem_sharing_nominate_page(struct p2m_domain *p2m,
+int mem_sharing_nominate_page(struct domain *d,
unsigned long gfn,
int expected_refcnt,
shr_handle_t *phandle);
#define MEM_SHARING_DESTROY_GFN (1<<1)
-int mem_sharing_unshare_page(struct p2m_domain *p2m,
+int mem_sharing_unshare_page(struct domain *d,
unsigned long gfn,
uint16_t flags);
int mem_sharing_sharing_resume(struct domain *d);
-int mem_sharing_cache_resize(struct p2m_domain *p2m, int new_size);
int mem_sharing_domctl(struct domain *d,
xen_domctl_mem_sharing_op_t *mec);
void mem_sharing_init(void);
} while (0)
-/* Read P2M table, mapping pages as we go.
- * Do not populate PoD pages. */
+/* Read a particular P2M table, mapping pages as we go. Most callers
+ * should _not_ call this directly; use the other gfn_to_mfn_* functions
+ * below unless you know you want to walk a p2m that isn't a domain's
+ * main one. */
static inline mfn_t
gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
#ifdef __x86_64__
if ( q == p2m_unshare && p2m_is_shared(*t) )
{
- mem_sharing_unshare_page(p2m, gfn, 0);
+ ASSERT(!p2m_is_nestedp2m(p2m));
+ mem_sharing_unshare_page(p2m->domain, gfn, 0);
mfn = p2m->get_entry(p2m, gfn, t, a, q);
}
#endif
/* General conversion function from gfn to mfn */
-static inline mfn_t _gfn_to_mfn_type(struct p2m_domain *p2m,
- unsigned long gfn, p2m_type_t *t,
- p2m_query_t q)
+static inline mfn_t gfn_to_mfn_type(struct domain *d,
+ unsigned long gfn, p2m_type_t *t,
+ p2m_query_t q)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_access_t a;
if ( !p2m || !paging_mode_translate(p2m->domain) )
return gfn_to_mfn_type_p2m(p2m, gfn, t, &a, q);
}
-#define gfn_to_mfn(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_alloc)
-#define gfn_to_mfn_query(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_query)
-#define gfn_to_mfn_guest(p2m, g, t) _gfn_to_mfn_type((p2m), (g), (t), p2m_guest)
-#define gfn_to_mfn_unshare(p, g, t) _gfn_to_mfn_type((p), (g), (t), p2m_unshare)
+#define gfn_to_mfn(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_alloc)
+#define gfn_to_mfn_query(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_query)
+#define gfn_to_mfn_guest(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_guest)
+#define gfn_to_mfn_unshare(d, g, t) gfn_to_mfn_type((d), (g), (t), p2m_unshare)
/* Compatibility function exporting the old untyped interface */
static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
{
mfn_t mfn;
p2m_type_t t;
- mfn = gfn_to_mfn(d->arch.p2m, gpfn, &t);
+ mfn = gfn_to_mfn(d, gpfn, &t);
if ( p2m_is_valid(t) )
return mfn_x(mfn);
return INVALID_MFN;
void p2m_final_teardown(struct domain *d);
/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct p2m_domain *p2m, unsigned long gfn,
+int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, unsigned int page_order,
p2m_type_t t);
-/* Remove a page from a domain's p2m table */
-void guest_physmap_remove_entry(struct p2m_domain *p2m, unsigned long gfn,
- unsigned long mfn, unsigned int page_order);
-
-/* Set a p2m range as populate-on-demand */
-int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
- unsigned int order);
-
/* Untyped version for RAM only, for compatibility */
static inline int guest_physmap_add_page(struct domain *d,
unsigned long gfn,
unsigned long mfn,
unsigned int page_order)
{
- return guest_physmap_add_entry(d->arch.p2m, gfn, mfn, page_order, p2m_ram_rw);
+ return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
}
/* Remove a page from a domain's p2m table */
-static inline void guest_physmap_remove_page(struct domain *d,
+void guest_physmap_remove_page(struct domain *d,
unsigned long gfn,
- unsigned long mfn, unsigned int page_order)
-{
- guest_physmap_remove_entry(d->arch.p2m, gfn, mfn, page_order);
-}
+ unsigned long mfn, unsigned int page_order);
+
+/* Set a p2m range as populate-on-demand */
+int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+ unsigned int order);
/* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt);
+void p2m_change_entry_type_global(struct domain *d,
+ p2m_type_t ot, p2m_type_t nt);
/* Compare-exchange the type of a single p2m entry */
-p2m_type_t p2m_change_type(struct p2m_domain *p2m, unsigned long gfn,
+p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
p2m_type_t ot, p2m_type_t nt);
/* Set mmio addresses in the p2m table (for pass-through) */
-int set_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
-int clear_mmio_p2m_entry(struct p2m_domain *p2m, unsigned long gfn);
+int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
/*
*/
/* Dump PoD information about the domain */
-void p2m_pod_dump_data(struct p2m_domain *p2m);
+void p2m_pod_dump_data(struct domain *d);
/* Move all pages from the populate-on-demand cache to the domain page_list
* (usually in preparation for domain destruction) */
xen_pfn_t gpfn,
unsigned int order);
-/* Called by p2m code when demand-populating a PoD page */
-int
-p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
- unsigned int order,
- p2m_query_t q);
-
/* Scan pod cache when offline/broken page triggered */
int
p2m_pod_offline_or_broken_hit(struct page_info *p);
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
+
/*
* Paging to disk and page-sharing
*/
#ifdef __x86_64__
/* Modify p2m table for shared gfn */
-int set_shared_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn);
+int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
/* Check if a nominated gfn is valid to be paged out */
-int p2m_mem_paging_nominate(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
/* Evict a frame */
-int p2m_mem_paging_evict(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
/* Tell xenpaging to drop a paged out frame */
-void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn);
+void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn);
/* Start populating a paged out frame */
-void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn);
+void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
/* Prepare the p2m for paging a frame in */
-int p2m_mem_paging_prep(struct p2m_domain *p2m, unsigned long gfn);
+int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
/* Resume normal operation (in case a domain was paused) */
-void p2m_mem_paging_resume(struct p2m_domain *p2m);
+void p2m_mem_paging_resume(struct domain *d);
#else
-static inline void p2m_mem_paging_drop_page(struct p2m_domain *p2m, unsigned long gfn)
+static inline void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
{ }
-static inline void p2m_mem_paging_populate(struct p2m_domain *p2m, unsigned long gfn)
+static inline void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
{ }
#endif
{ }
#endif
+/*
+ * Internal functions, only called by other p2m code
+ */
+
struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
#endif
+/* Called by p2m code when demand-populating a PoD page */
+int
+p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned int order,
+ p2m_query_t q);
/*
* Functions specific to the p2m-pt implementation
}
/*
- * Nested p2m: shadow p2m tables used for nexted HVM virtualization
+ * Nested p2m: shadow p2m tables used for nested HVM virtualization
*/
/* Flushes specified p2m table */