* domain matches target */
int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
+/* Obtain a consistent snapshot of PoD related domain state. */
+void p2m_pod_get_mem_target(const struct domain *d, xen_pod_target_t *target);
+
+/* Check whether PoD is (still) active in a domain. */
+bool p2m_pod_active(const struct domain *d);
+
/* Scan pod cache when offline/broken page triggered */
int
p2m_pod_offline_or_broken_hit(struct page_info *p);
void
p2m_pod_offline_or_broken_replace(struct page_info *p);
-static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
-{
- return p2m->pod.entry_count;
-}
-
#else
static inline bool
return 0;
}
+static inline bool p2m_pod_active(const struct domain *d)
+{
+ return false;
+}
+
static inline int p2m_pod_offline_or_broken_hit(struct page_info *p)
{
return 0;
ASSERT_UNREACHABLE();
}
-static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
-{
- return 0;
-}
-
#endif
{
xen_pod_target_t target;
struct domain *d;
- struct p2m_domain *p2m;
if ( copy_from_guest(&target, arg, 1) )
return -EFAULT;
}
else if ( rc >= 0 )
{
- p2m = p2m_get_hostp2m(d);
- target.tot_pages = domain_tot_pages(d);
- target.pod_cache_pages = p2m->pod.count;
- target.pod_entries = p2m->pod.entry_count;
+ p2m_pod_get_mem_target(d, &target);
if ( __copy_to_guest(arg, &target, 1) )
rc = -EFAULT;
*/
#include <xen/event.h>
+#include <xen/iocap.h>
#include <xen/ioreq.h>
#include <xen/mm.h>
#include <xen/sched.h>
ASSERT( pod_target >= p2m->pod.count );
- ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
+ if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
+ ret = -ENOTEMPTY;
+ else
+ ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
out:
pod_unlock(p2m);
return ret;
}
+void p2m_pod_get_mem_target(const struct domain *d, xen_pod_target_t *target)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ ASSERT(is_hvm_domain(d));
+
+ pod_lock(p2m);
+ lock_page_alloc(p2m);
+
+ target->tot_pages = domain_tot_pages(d);
+ target->pod_cache_pages = p2m->pod.count;
+ target->pod_entries = p2m->pod.entry_count;
+
+ unlock_page_alloc(p2m);
+ pod_unlock(p2m);
+}
+
int p2m_pod_empty_cache(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
if ( !paging_mode_translate(d) )
return -EINVAL;
+ if ( has_arch_pdevs(d) || cache_flush_permitted(d) )
+ return -ENOTEMPTY;
+
do {
rc = mark_populate_on_demand(d, gfn, chunk_order);
for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
}
+
+bool p2m_pod_active(const struct domain *d)
+{
+ struct p2m_domain *p2m;
+ bool res;
+
+ if ( !is_hvm_domain(d) )
+ return false;
+
+ p2m = p2m_get_hostp2m(d);
+
+ pod_lock(p2m);
+ res = p2m->pod.entry_count | p2m->pod.count;
+ pod_unlock(p2m);
+
+ return res;
+}
rc = -EXDEV;
/* Disallow paging in a PoD guest */
- if ( p2m_pod_entry_count(p2m_get_hostp2m(d)) )
+ if ( p2m_pod_active(d) )
break;
/* domain_pause() not required here, see XSA-99 */
{
/*
* Prevent device assign if mem paging, mem sharing or log-dirty
- * have been enabled for this domain.
+ * have been enabled for this domain, or if PoD is still in active use.
*/
return d == dom_io ||
(likely(!mem_sharing_enabled(d)) &&
likely(!mem_paging_enabled(d)) &&
+ likely(!p2m_pod_active(d)) &&
likely(!p2m_is_global_logdirty(d)));
}