#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/event.h>
#include <asm/hvm/vmx/vmx.h>
+#include <asm/altp2m.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <public/sched.h>
{
hvm_all_ioreq_servers_remove_vcpu(v->domain, v);
+ altp2m_vcpu_destroy(v);
nestedhvm_vcpu_destroy(v);
free_compat_arg_xlat(v);
v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
}
+void altp2m_vcpu_update_p2m(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_update_p2m )
+ hvm_funcs.altp2m_vcpu_update_p2m(v);
+}
+
+void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
+ hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+}
+
+bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_emulate_ve )
+ return hvm_funcs.altp2m_vcpu_emulate_ve(v);
+ return 0;
+}
+
/*
* Local variables:
* mode: C
obj-y += paging.o
obj-y += p2m.o p2m-pt.o p2m-ept.o p2m-pod.o
+obj-y += altp2m.o
obj-y += guest_walk_2.o
obj-y += guest_walk_3.o
obj-$(x86_64) += guest_walk_4.o
--- /dev/null
+/*
+ * Alternate p2m HVM
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <asm/hvm/support.h>
+#include <asm/hvm/hvm.h>
+#include <asm/p2m.h>
+#include <asm/altp2m.h>
+
+void
+altp2m_vcpu_reset(struct vcpu *v)
+{
+ struct altp2mvcpu *av = &vcpu_altp2m(v);
+
+ av->p2midx = INVALID_ALTP2M;
+ av->veinfo_gfn = _gfn(INVALID_GFN);
+}
+
+void
+altp2m_vcpu_initialise(struct vcpu *v)
+{
+ if ( v != current )
+ vcpu_pause(v);
+
+ altp2m_vcpu_reset(v);
+ vcpu_altp2m(v).p2midx = 0;
+ atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+
+ altp2m_vcpu_update_p2m(v);
+
+ if ( v != current )
+ vcpu_unpause(v);
+}
+
+void
+altp2m_vcpu_destroy(struct vcpu *v)
+{
+ struct p2m_domain *p2m;
+
+ if ( v != current )
+ vcpu_pause(v);
+
+ if ( (p2m = p2m_get_altp2m(v)) )
+ atomic_dec(&p2m->active_vcpus);
+
+ altp2m_vcpu_reset(v);
+
+ altp2m_vcpu_update_p2m(v);
+ altp2m_vcpu_update_vmfunc_ve(v);
+
+ if ( v != current )
+ vcpu_unpause(v);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
int hap_enable(struct domain *d, u32 mode)
{
unsigned int old_pages;
- uint8_t i;
+ unsigned int i;
int rv = 0;
domain_pause(d);
goto out;
}
+ if ( hvm_altp2m_supported() )
+ {
+ /* Init alternate p2m data */
+ if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+ {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ for ( i = 0; i < MAX_EPTP; i++ )
+ d->arch.altp2m_eptp[i] = INVALID_MFN;
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+ if ( rv != 0 )
+ goto out;
+ }
+
+ d->arch.altp2m_active = 0;
+ }
+
/* Now let other users see the new mode */
d->arch.paging.mode = mode | PG_HAP_enable;
void hap_final_teardown(struct domain *d)
{
- uint8_t i;
+ unsigned int i;
+
+ if ( hvm_altp2m_supported() )
+ {
+ d->arch.altp2m_active = 0;
+
+ if ( d->arch.altp2m_eptp )
+ {
+ free_xenheap_page(d->arch.altp2m_eptp);
+ d->arch.altp2m_eptp = NULL;
+ }
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ p2m_teardown(d->arch.altp2m_p2m[i]);
+ }
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
#define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
-/* P2M lock (per-p2m-table)
+/* P2M lock (per-non-alt-p2m-table)
*
* This protects all queries and updates to the p2m table.
* Queries may be made under the read lock but all modifications
*
* The write lock is recursive as it is common for a code path to look
* up a gfn and later mutate it.
+ *
+ * Note that this lock shares its implementation with the altp2m
+ * lock (not the altp2m list lock), so the implementation
+ * is found there.
+ *
+ * Changes made to the host p2m when in altp2m mode are propagated to the
+ * altp2ms synchronously in ept_set_entry(). At that point, we will hold
+ * the host p2m lock; propagating this change involves grabbing the
+ * altp2m_list lock, and the locks of the individual alternate p2ms. In
+ * order to allow us to maintain locking order discipline, we split the p2m
+ * lock into p2m (for host p2ms) and altp2m (for alternate p2ms), putting
+ * the altp2mlist lock in the middle.
*/
declare_mm_rwlock(p2m);
-#define p2m_lock(p) mm_write_lock(p2m, &(p)->lock);
+
+/* Alternate P2M list lock (per-domain)
+ *
+ * A per-domain lock that protects the list of alternate p2m's.
+ * Any operation that walks the list needs to acquire this lock.
+ * Additionally, before destroying an alternate p2m all VCPU's
+ * in the target domain must be paused.
+ */
+
+declare_mm_lock(altp2mlist)
+#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+
+/* P2M lock (per-altp2m-table)
+ *
+ * This protects all queries and updates to the p2m table.
+ * Queries may be made under the read lock but all modifications
+ * need the main (write) lock.
+ *
+ * The write lock is recursive as it is common for a code path to look
+ * up a gfn and later mutate it.
+ */
+
+declare_mm_rwlock(altp2m);
+#define p2m_lock(p) \
+{ \
+ if ( p2m_is_altp2m(p) ) \
+ mm_write_lock(altp2m, &(p)->lock); \
+ else \
+ mm_write_lock(p2m, &(p)->lock); \
+}
#define p2m_unlock(p) mm_write_unlock(&(p)->lock);
#define gfn_lock(p,g,o) p2m_lock(p)
#define gfn_unlock(p,g,o) p2m_unlock(p)
#include <asm/hvm/vmx/vmx.h> /* ept_p2m_init() */
#include <asm/mem_sharing.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/altp2m.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <xsm/xsm.h>
}
}
+static void p2m_teardown_altp2m(struct domain *d)
+{
+ unsigned int i;
+ struct p2m_domain *p2m;
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ if ( !d->arch.altp2m_p2m[i] )
+ continue;
+ p2m = d->arch.altp2m_p2m[i];
+ d->arch.altp2m_p2m[i] = NULL;
+ p2m_free_one(p2m);
+ }
+}
+
+static int p2m_init_altp2m(struct domain *d)
+{
+ unsigned int i;
+ struct p2m_domain *p2m;
+
+ mm_lock_init(&d->arch.altp2m_list_lock);
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+ if ( p2m == NULL )
+ {
+ p2m_teardown_altp2m(d);
+ return -ENOMEM;
+ }
+ p2m->p2m_class = p2m_alternate;
+ p2m->access_required = 1;
+ _atomic_set(&p2m->active_vcpus, 0);
+ }
+
+ return 0;
+}
+
int p2m_init(struct domain *d)
{
int rc;
* (p2m_init runs too early for HVM_PARAM_* options) */
rc = p2m_init_nestedp2m(d);
if ( rc )
+ {
+ p2m_teardown_hostp2m(d);
+ return rc;
+ }
+
+ rc = p2m_init_altp2m(d);
+ if ( rc )
+ {
p2m_teardown_hostp2m(d);
+ p2m_teardown_nestedp2m(d);
+ }
return rc;
}
return err;
}
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
+{
+ struct p2m_domain *p2m;
+ struct ept_data *ept;
+ unsigned int i;
+
+ altp2m_list_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+ continue;
+
+ p2m = d->arch.altp2m_p2m[i];
+ ept = &p2m->ept;
+
+ if ( eptp == ept_get_eptp(ept) )
+ goto out;
+ }
+
+ i = INVALID_ALTP2M;
+
+ out:
+ altp2m_list_unlock(d);
+ return i;
+}
+
+bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
+{
+ struct domain *d = v->domain;
+ bool_t rc = 0;
+
+ if ( idx > MAX_ALTP2M )
+ return rc;
+
+ altp2m_list_lock(d);
+
+ if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+ {
+ if ( idx != vcpu_altp2m(v).p2midx )
+ {
+ atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
+ vcpu_altp2m(v).p2midx = idx;
+ atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+ altp2m_vcpu_update_p2m(v);
+ }
+ rc = 1;
+ }
+
+ altp2m_list_unlock(d);
+ return rc;
+}
+
/*** Audit ***/
#if P2M_AUDIT
--- /dev/null
+/*
+ * Alternate p2m HVM
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef _X86_ALTP2M_H
+#define _X86_ALTP2M_H
+
+#include <xen/types.h>
+#include <xen/sched.h> /* for struct vcpu, struct domain */
+#include <asm/hvm/vcpu.h> /* for vcpu_altp2m */
+
+/* Alternate p2m HVM on/off per domain */
+static inline bool_t altp2m_active(const struct domain *d)
+{
+ return d->arch.altp2m_active;
+}
+
+/* Alternate p2m VCPU */
+void altp2m_vcpu_initialise(struct vcpu *v);
+void altp2m_vcpu_destroy(struct vcpu *v);
+void altp2m_vcpu_reset(struct vcpu *v);
+
+#endif /* _X86_ALTP2M_H */
+
typedef xen_domctl_cpuid_t cpuid_input_t;
#define MAX_NESTEDP2M 10
+
+#define MAX_ALTP2M 10 /* arbitrary */
+#define INVALID_ALTP2M 0xffff
+#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
struct p2m_domain;
struct time_scale {
int shift;
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
mm_lock_t nested_p2m_lock;
+ /* altp2m: allow multiple copies of host p2m */
+ bool_t altp2m_active;
+ struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+ mm_lock_t altp2m_list_lock;
+ uint64_t *altp2m_eptp;
+
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
struct radix_tree_root irq_pirq;
void (*enable_msr_exit_interception)(struct domain *d);
bool_t (*is_singlestep_supported)(void);
+
+ /* Alternate p2m */
+ void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
+ void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
+ bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
};
extern struct hvm_function_table hvm_funcs;
#define opt_hvm_fep 0
#endif
+/* updates the current hardware p2m */
+void altp2m_vcpu_update_p2m(struct vcpu *v);
+
+/* updates VMCS fields related to VMFUNC and #VE */
+void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v);
+
+/* emulates #VE */
+bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_HVM_H__ */
/*
#define vcpu_nestedhvm(v) ((v)->arch.hvm_vcpu.nvcpu)
+struct altp2mvcpu {
+ uint16_t p2midx; /* alternate p2m index */
+ gfn_t veinfo_gfn; /* #VE information page gfn */
+};
+
+#define vcpu_altp2m(v) ((v)->arch.hvm_vcpu.avcpu)
+
struct hvm_vcpu {
/* Guest control-register and EFER values, just as the guest sees them. */
unsigned long guest_cr[5];
struct nestedvcpu nvcpu;
+ struct altp2mvcpu avcpu;
+
struct mtrr_state mtrr;
u64 pat_cr;
typedef enum {
p2m_host,
p2m_nested,
+ p2m_alternate,
} p2m_class_t;
/* Per-p2m-table state */
struct domain *domain; /* back pointer to domain */
- p2m_class_t p2m_class; /* host/nested/? */
+ p2m_class_t p2m_class; /* host/nested/alternate */
/* Nested p2ms only: nested p2m base value that this p2m shadows.
* This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
* host p2m's lock. */
int defer_nested_flush;
+ /* Alternate p2m: count of vcpu's currently using this p2m. */
+ atomic_t active_vcpus;
+
/* Pages used to construct the p2m */
struct page_list_head pages;
return p2m->p2m_class == p2m_nested;
}
+static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m)
+{
+ return p2m->p2m_class == p2m_alternate;
+}
+
#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
/**** p2m query accessors. They lock p2m_lock, and thus serialize
void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);
+/*
+ * Alternate p2m: shadow p2m tables used for alternate memory views
+ */
+
+/* get current alternate p2m table */
+static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
+{
+ unsigned int index = vcpu_altp2m(v).p2midx;
+
+ if ( index == INVALID_ALTP2M )
+ return NULL;
+
+ BUG_ON(index >= MAX_ALTP2M);
+
+ return v->domain->arch.altp2m_p2m[index];
+}
+
+/* Locate an alternate p2m by its EPTP */
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
+
+/* Switch alternate p2m for a single vcpu */
+bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx);
+
/*
* p2m type to IOMMU flags
*/