DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
+static void p2m_nestedp2m_init(struct p2m_domain *p2m)
+{
+#ifdef CONFIG_HVM
+ INIT_LIST_HEAD(&p2m->np2m_list);
+
+ p2m->np2m_base = P2M_BASE_EADDR;
+ p2m->np2m_generation = 0;
+#endif
+}
+
/* Init the datastructures for later use by the p2m code */
static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
{
int ret = 0;
mm_rwlock_init(&p2m->lock);
- INIT_LIST_HEAD(&p2m->np2m_list);
INIT_PAGE_LIST_HEAD(&p2m->pages);
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
p2m->p2m_class = p2m_host;
- p2m->np2m_base = P2M_BASE_EADDR;
- p2m->np2m_generation = 0;
-
p2m_pod_init(p2m);
+ p2m_nestedp2m_init(p2m);
if ( hap_enabled(d) && cpu_has_vmx )
ret = ept_p2m_init(p2m);
}
}
+#ifdef CONFIG_HVM
static void p2m_teardown_nestedp2m(struct domain *d)
{
unsigned int i;
return 0;
}
+#endif
static void p2m_teardown_altp2m(struct domain *d)
{
if ( rc )
return rc;
+#ifdef CONFIG_HVM
/* Must initialise nestedp2m unconditionally
* since nestedhvm_enabled(d) returns false here.
* (p2m_init runs too early for HVM_PARAM_* options) */
p2m_teardown_hostp2m(d);
return rc;
}
+#endif
rc = p2m_init_altp2m(d);
if ( rc )
{
p2m_teardown_hostp2m(d);
+#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
+#endif
}
return rc;
* we initialise them unconditionally.
*/
p2m_teardown_altp2m(d);
+#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
+#endif
/* Iterate over all p2m tables per domain */
p2m_teardown_hostp2m(d);
p2m_switch_vcpu_altp2m_by_id(v, idx);
}
+#ifdef CONFIG_HVM
static struct p2m_domain *
p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
{
p2m_unlock(p2m);
}
}
+#endif
unsigned long paging_gva_to_gfn(struct vcpu *v,
unsigned long va,
p2m_class_t p2m_class; /* host/nested/alternate */
+#ifdef CONFIG_HVM
/* Nested p2ms only: nested p2m base value that this p2m shadows.
* This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
* needs both the per-p2m lock and the per-domain nestedp2m lock
* The host p2m hasolds the head of the list and the np2ms are
* threaded on in LRU order. */
struct list_head np2m_list;
+#endif
/* Host p2m: Log-dirty ranges registered for the domain. */
struct rangeset *logdirty_ranges;
#define NP2M_SCHEDLE_IN 0
#define NP2M_SCHEDLE_OUT 1
+#ifdef CONFIG_HVM
void np2m_schedule(int dir);
+#else
+static inline void np2m_schedule(int dir) {}
+#endif
static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
{