return ret;
}
+/* Clean up on domain destruction */
+void mem_event_cleanup(struct domain *d)
+{
+ if ( d->mem_event->paging.ring_page )
+ (void)mem_event_disable(d, &d->mem_event->paging);
+ if ( d->mem_event->access.ring_page )
+ (void)mem_event_disable(d, &d->mem_event->access);
+ if ( d->mem_event->share.ring_page )
+ (void)mem_event_disable(d, &d->mem_event->share);
+}
+
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE(void) u_domctl)
{
break;
}
d->is_dying = DOMDYING_dead;
+ /* Mem event cleanup has to go here because the rings
+ * have to be put before we call put_domain. */
+ mem_event_cleanup(d);
put_domain(d);
send_global_virq(VIRQ_DOM_EXC);
/* fallthrough */
machine_to_phys_mapping[(mfn)] = (pfn); \
})
-#define put_gfn(d, g) ((void)0)
+static inline void put_gfn(struct domain *d, unsigned long gfn) {}
+static inline void mem_event_cleanup(struct domain *d) {}
#define INVALID_MFN (~0UL)
gmfn_to_mfn_foreign((_d), (gpfn))
#define get_gfn_untyped(d, gpfn) gmfn_to_mfn(d, gpfn)
-#define put_gfn(d, g) ((void)0)
+static inline void put_gfn(struct domain *d, unsigned long gfn) {}
+static inline void mem_event_cleanup(struct domain *d) {}
#define __gpfn_invalid(_d, gpfn) \
(lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN)
unsigned long domain_get_maximum_gpfn(struct domain *d);
+#ifdef CONFIG_X86_64
+void mem_event_cleanup(struct domain *d);
+#else
+static inline void mem_event_cleanup(struct domain *d) {}
+#endif
+
extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
/* Definition of an mm lock: spinlock with extra fields for debugging */