... for being unsupported.
While doing so, make the option dependent upon HVM, which really is the
main purpose of the change.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Alexandru Isaila <aisaila@bitdefender.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
select HAS_FAST_MULTIPLY
select HAS_IOPORTS
select HAS_KEXEC
- select HAS_MEM_PAGING
select HAS_NS16550
select HAS_PASSTHROUGH
select HAS_PCI
endif
+config MEM_PAGING
+ bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
+ depends on HVM
+
config MEM_SHARING
bool "Xen memory sharing support (UNSUPPORTED)" if UNSUPPORTED
depends on HVM
goto out_put_gfn;
}
+#ifdef CONFIG_MEM_PAGING
/* Check if the page has been paged out */
if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
paged = 1;
+#endif
#ifdef CONFIG_MEM_SHARING
/* Mem sharing: if still shared on write access then its enomem */
obj-$(CONFIG_HVM) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
obj-$(CONFIG_SHADOW_PAGING) += guest_walk_4.o
obj-$(CONFIG_MEM_ACCESS) += mem_access.o
-obj-y += mem_paging.o
+obj-$(CONFIG_MEM_PAGING) += mem_paging.o
obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
obj-y += p2m.o p2m-pt.o
obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
case XENMEM_get_sharing_shared_pages:
return mem_sharing_get_nr_shared_mfns();
+#ifdef CONFIG_MEM_PAGING
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#endif
#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
case XENMEM_get_sharing_shared_pages:
return mem_sharing_get_nr_shared_mfns();
+#ifdef CONFIG_MEM_PAGING
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#endif
#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
config HAS_KEXEC
bool
-config HAS_MEM_PAGING
- bool
-
config HAS_PDX
bool
free_xenoprof_pages(d);
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
xfree(d->vm_event_paging);
#endif
xfree(d->vm_event_monitor);
page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
if ( p2m_is_paging(p2mt) )
{
if ( page )
/* Check flags which apply only when the vCPU is paused */
if ( atomic_read(&v->vm_event_pause_count) )
{
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
p2m_mem_paging_resume(d, &rsp);
#endif
return vm_event_grab_slot(ved, current->domain != d);
}
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_paging_notification(struct vcpu *v, unsigned int port)
{
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d)
{
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
if ( vm_event_check_ring(d->vm_event_paging) )
{
/* Destroying the wait queue head means waking up all
switch ( vec->mode )
{
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
case XEN_DOMCTL_VM_EVENT_OP_PAGING:
{
rc = -EINVAL;
#include <asm/hvm/io.h>
#include <asm/io_apic.h>
+#include <asm/mem_paging.h>
#include <asm/setup.h>
const struct iommu_init_ops *__initdata iommu_init_ops;
*/
return d == dom_io ||
(likely(!mem_sharing_enabled(d)) &&
- likely(!vm_event_check_ring(d->vm_event_paging)) &&
+ likely(!mem_paging_enabled(d)) &&
likely(!p2m_get_hostp2m(d)->global_logdirty));
}
int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg);
+#ifdef CONFIG_MEM_PAGING
+# define mem_paging_enabled(d) vm_event_check_ring((d)->vm_event_paging)
+#else
+# define mem_paging_enabled(d) false
+#endif
+
#endif /*__ASM_X86_MEM_PAGING_H__ */
/*
#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw) \
| p2m_to_mask(p2m_ram_logdirty) )
+#ifdef CONFIG_MEM_PAGING
#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \
| p2m_to_mask(p2m_ram_paged) \
| p2m_to_mask(p2m_ram_paging_in))
#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#else
+#define P2M_PAGING_TYPES 0
+#define P2M_PAGED_TYPES 0
+#endif
/* Shared types */
/* XXX: Sharable types could include p2m_ram_ro too, but we would need to
struct domain *parent; /* VM fork parent */
#endif
/* Memory paging support */
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
struct vm_event_domain *vm_event_paging;
#endif
/* VM event monitor support */
}
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
int (*mem_access) (struct domain *d);
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
int (*mem_paging) (struct domain *d);
#endif
}
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
{
return xsm_ops->mem_paging(d);
set_to_dummy_if_null(ops, mem_access);
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
set_to_dummy_if_null(ops, mem_paging);
#endif
}
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
static int flask_mem_paging(struct domain *d)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING);
.mem_access = flask_mem_access,
#endif
-#ifdef CONFIG_HAS_MEM_PAGING
+#ifdef CONFIG_MEM_PAGING
.mem_paging = flask_mem_paging,
#endif