Moves phys_table from struct domain to struct p2m_domain.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
unsigned long mfn = 0;
p2m_type_t p2mt;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
if ( c->pending_valid &&
((c->pending_type == 1) || (c->pending_type > 6) ||
{
vmcb->np_enable = 1;
vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
- vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+ vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m));
}
if ( c->pending_valid )
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/paging.h>
+#include <asm/p2m.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
#include <asm/hvm/support.h>
{
vmcb->np_enable = 1; /* enable nested paging */
vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
- vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+ vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
/* No point in intercepting CR3 reads/writes. */
vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE);
d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
d->arch.hvm_domain.vmx.ept_control.gaw = EPT_DEFAULT_GAW;
d->arch.hvm_domain.vmx.ept_control.asr =
- pagetable_get_pfn(d->arch.phys_table);
+ pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
/* Install the domain-specific P2M table */
l4e[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
+ l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
__PAGE_HYPERVISOR);
hap_unmap_domain_page(l4e);
static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
{
struct domain *d = v->domain;
+ struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
l2_pgentry_t *l2e;
l3_pgentry_t *p2m;
int i;
l2e_empty();
/* Install the domain-specific p2m table */
- ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
- p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+ ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
+ p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
{
l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
int direct_mmio = (p2mt == p2m_mmio_direct);
uint8_t ipat = 0;
int need_modify_vtd_table = 1;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
if ( order != 0 )
if ( (gfn & ((1UL << order) - 1)) )
return 1;
- table = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
ASSERT(table != NULL);
p2m_query_t q)
{
ept_entry_t *table =
- map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
unsigned long gfn_remainder = gfn;
ept_entry_t *ept_entry;
u32 index;
static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, int *level)
{
ept_entry_t *table =
- map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
unsigned long gfn_remainder = gfn;
ept_entry_t *ept_entry;
ept_entry_t content = { .epte = 0 };
void ept_walk_table(struct domain *d, unsigned long gfn)
{
ept_entry_t *table =
- map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
unsigned long gfn_remainder = gfn;
int i;
int i2;
int i1;
- if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+ if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 )
return;
BUG_ON(EPT_DEFAULT_GAW != 3);
- l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
{
if ( !l4e[i4].epte )
unsigned long index;
unsigned long gfn, gfn_remainder;
unsigned long record_counter = 0;
+ struct p2m_domain *p2m;
for_each_domain(d)
{
if ( !(is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled) )
continue;
+ p2m = p2m_get_hostp2m(d);
printk("\ndomain%d EPT p2m table: \n", d->domain_id);
for ( gfn = 0; gfn <= d->arch.p2m->max_mapped_pfn; gfn += (1 << order) )
gfn_remainder = gfn;
mfn = _mfn(INVALID_MFN);
table =
- map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
{
unsigned int page_order, p2m_type_t p2mt)
{
// XXX -- this might be able to be faster iff current->domain == d
- mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
+ mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
void *table =map_domain_page(mfn_x(table_mfn));
unsigned long i, gfn_remainder = gfn;
l1_pgentry_t *p2m_entry;
* XXX we will return p2m_invalid for unmapped gfns */
*t = p2m_mmio_dm;
- mfn = pagetable_get_mfn(d->arch.phys_table);
+ mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
if ( gfn > d->arch.p2m->max_mapped_pfn )
/* This pfn is higher than the highest the p2m map currently holds */
struct page_info *page, *p2m_top;
unsigned int page_count = 0;
unsigned long gfn = -1UL;
- struct p2m_domain *p2m = d->arch.p2m;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_lock(p2m);
- if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+ if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
{
P2M_ERROR("p2m already allocated for this domain\n");
p2m_unlock(p2m);
return -ENOMEM;
}
- d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+ p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
P2M_PRINTK("populating p2m table\n");
* We know we don't have any extra mappings to these pages */
{
struct page_info *pg;
- struct p2m_domain *p2m = d->arch.p2m;
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long gfn;
p2m_type_t t;
mfn_t mfn;
if(mfn_valid(mfn) && (t == p2m_ram_shared))
BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
}
- d->arch.phys_table = pagetable_null();
+ p2m->phys_table = pagetable_null();
while ( (pg = page_list_remove_head(&p2m->pages)) )
p2m->free_page(d, pg);
spin_unlock(&d->page_alloc_lock);
/* Audit part two: walk the domain's p2m table, checking the entries. */
- if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+ if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
{
l2_pgentry_t *l2e;
l1_pgentry_t *l1e;
l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
int i3, i4;
- l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
#else /* CONFIG_PAGING_LEVELS == 3 */
l3_pgentry_t *l3e;
int i3;
- l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
#endif
gfn = 0;
l4_pgentry_t *l4e;
unsigned long i4;
#endif /* CONFIG_PAGING_LEVELS == 4 */
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
if ( !paging_mode_translate(d) )
return;
- if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+ if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
return;
ASSERT(p2m_locked_by_me(d->arch.p2m));
#if CONFIG_PAGING_LEVELS == 4
- l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
#else /* CONFIG_PAGING_LEVELS == 3 */
- l3mfn = _mfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
- l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+ l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+ l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
#endif
#if CONFIG_PAGING_LEVELS >= 4
out_locked:
shadow_unlock(d);
out_unlocked:
- if ( rv != 0 && !pagetable_is_null(d->arch.phys_table) )
+ if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) )
p2m_teardown(d);
if ( rv != 0 && pg != NULL )
shadow_free_p2m_page(d, pg);
{
/* install domain-specific P2M table */
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
- shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
+ shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
__PAGE_HYPERVISOR);
}
{
/* Install the domain-specific p2m table */
l3_pgentry_t *p2m;
- ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
- p2m = sh_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+ ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 0);
+ p2m = sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
{
sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
#endif
}
+#define p2m_get_hostp2m(d) (d)
+
#endif // __ASM_P2M_ENTRY_H__
/*
struct paging_domain paging;
struct p2m_domain *p2m;
- /* Shadow translated domain: P2M mapping */
- pagetable_t phys_table;
-
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
int *irq_pirq;
int *pirq_irq;
#include <xen/config.h>
#include <xen/paging.h>
#include <asm/mem_sharing.h>
+#include <asm/page.h> /* for pagetable_t */
/*
* The phys_to_machine_mapping maps guest physical frame numbers
int locker; /* processor which holds the lock */
const char *locker_function; /* Func that took it */
+ /* Shadow translated domain: p2m mapping */
+ pagetable_t phys_table;
+
/* Pages used to construct the p2m */
struct page_list_head pages;
} pod;
};
+/* get host p2m table */
+#define p2m_get_hostp2m(d) ((d)->arch.p2m)
+
+#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
+
/*
* The P2M lock. This protects all updates to the p2m table.
* Updates are expected to be safe against concurrent reads,