int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
- unsigned long gfn);
-int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn);
-int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn);
-int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
- unsigned long gfn, void *buffer);
+ uint64_t gfn);
+int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, uint64_t gfn);
+int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, uint64_t gfn);
+int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
+ uint64_t gfn, void *buffer);
/**
* Access tracking operations.
return rc;
}
-int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
- unsigned int op, unsigned int mode,
- uint64_t gfn, void *buffer)
-{
- xen_mem_event_op_t meo;
-
- memset(&meo, 0, sizeof(meo));
-
- meo.op = op;
- meo.domain = domain_id;
- meo.gfn = gfn;
- meo.buffer = (unsigned long) buffer;
-
- return do_memory_op(xch, mode, &meo, sizeof(meo));
-}
-
void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
uint32_t *port, int enable_introspection)
{
#include "xc_private.h"
+static int xc_mem_paging_memop(xc_interface *xch, domid_t domain_id,
+ unsigned int op, uint64_t gfn, void *buffer)
+{
+ xen_mem_paging_op_t mpo;
+
+ memset(&mpo, 0, sizeof(mpo));
+
+ mpo.op = op;
+ mpo.domain = domain_id;
+ mpo.gfn = gfn;
+ mpo.buffer = (unsigned long) buffer;
+
+ return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
+}
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
uint32_t *port)
errno = EINVAL;
return -1;
}
-
+
return xc_mem_event_control(xch, domain_id,
XEN_MEM_EVENT_PAGING_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_PAGING,
NULL);
}
-int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn)
+int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, uint64_t gfn)
{
- return xc_mem_event_memop(xch, domain_id,
- XENMEM_paging_op_nominate,
- XENMEM_paging_op,
- gfn, NULL);
+ return xc_mem_paging_memop(xch, domain_id,
+ XENMEM_paging_op_nominate,
+ gfn, NULL);
}
-int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn)
+int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, uint64_t gfn)
{
- return xc_mem_event_memop(xch, domain_id,
- XENMEM_paging_op_evict,
- XENMEM_paging_op,
- gfn, NULL);
+ return xc_mem_paging_memop(xch, domain_id,
+ XENMEM_paging_op_evict,
+ gfn, NULL);
}
-int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn)
+int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, uint64_t gfn)
{
- return xc_mem_event_memop(xch, domain_id,
- XENMEM_paging_op_prep,
- XENMEM_paging_op,
- gfn, NULL);
+ return xc_mem_paging_memop(xch, domain_id,
+ XENMEM_paging_op_prep,
+ gfn, NULL);
}
-int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
- unsigned long gfn, void *buffer)
+int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
+ uint64_t gfn, void *buffer)
{
int rc, old_errno;
if ( mlock(buffer, XC_PAGE_SIZE) )
return -1;
-
- rc = xc_mem_event_memop(xch, domain_id,
- XENMEM_paging_op_prep,
- XENMEM_paging_op,
- gfn, buffer);
+
+ rc = xc_mem_paging_memop(xch, domain_id,
+ XENMEM_paging_op_prep,
+ gfn, buffer);
old_errno = errno;
munlock(buffer, XC_PAGE_SIZE);
*/
int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
unsigned int mode, uint32_t *port);
-int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
- unsigned int op, unsigned int mode,
- uint64_t gfn, void *buffer);
/*
* Enables mem_event and returns the mapped ring page indicated by param.
* param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
#include <xen/mem_event.h>
-int mem_paging_memop(struct domain *d, xen_mem_event_op_t *mec)
+int mem_paging_memop(struct domain *d, xen_mem_paging_op_t *mpo)
{
+ int rc = -ENODEV;
if ( unlikely(!d->mem_event->paging.ring_page) )
- return -ENODEV;
+ return rc;
- switch( mec->op )
+ switch( mpo->op )
{
case XENMEM_paging_op_nominate:
- {
- unsigned long gfn = mec->gfn;
- return p2m_mem_paging_nominate(d, gfn);
- }
- break;
+ rc = p2m_mem_paging_nominate(d, mpo->gfn);
+ break;
case XENMEM_paging_op_evict:
- {
- unsigned long gfn = mec->gfn;
- return p2m_mem_paging_evict(d, gfn);
- }
- break;
+ rc = p2m_mem_paging_evict(d, mpo->gfn);
+ break;
case XENMEM_paging_op_prep:
- {
- unsigned long gfn = mec->gfn;
- return p2m_mem_paging_prep(d, gfn, mec->buffer);
- }
- break;
+ rc = p2m_mem_paging_prep(d, mpo->gfn, mpo->buffer);
+ break;
default:
- return -ENOSYS;
+ rc = -ENOSYS;
break;
}
+
+ return rc;
}
case XENMEM_paging_op:
{
- xen_mem_event_op_t meo;
- if ( copy_from_guest(&meo, arg, 1) )
+ xen_mem_paging_op_t mpo;
+
+ if ( copy_from_guest(&mpo, arg, 1) )
return -EFAULT;
- rc = do_mem_event_op(cmd, meo.domain, &meo);
- if ( !rc && __copy_to_guest(arg, &meo, 1) )
+ rc = do_mem_event_op(cmd, mpo.domain, &mpo);
+ if ( !rc && __copy_to_guest(arg, &mpo, 1) )
return -EFAULT;
break;
}
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
+
if ( copy_from_guest(&mso, arg, 1) )
return -EFAULT;
if ( mso.op == XENMEM_sharing_op_audit )
case XENMEM_paging_op:
{
- xen_mem_event_op_t meo;
- if ( copy_from_guest(&meo, arg, 1) )
+ xen_mem_paging_op_t mpo;
+ if ( copy_from_guest(&mpo, arg, 1) )
return -EFAULT;
- rc = do_mem_event_op(cmd, meo.domain, &meo);
- if ( !rc && __copy_to_guest(arg, &meo, 1) )
+ rc = do_mem_event_op(cmd, mpo.domain, &mpo);
+ if ( !rc && __copy_to_guest(arg, &mpo, 1) )
return -EFAULT;
break;
}
{
#ifdef HAS_MEM_PAGING
case XENMEM_paging_op:
- ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
+ ret = mem_paging_memop(d, arg);
break;
#endif
#ifdef HAS_MEM_SHARING
case XENMEM_sharing_op:
- ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
+ ret = mem_sharing_memop(d, arg);
break;
#endif
default:
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#ifndef __ASM_X86_MEM_PAGING_H__
+#define __ASM_X86_MEM_PAGING_H__
-int mem_paging_memop(struct domain *d, xen_mem_event_op_t *meo);
+int mem_paging_memop(struct domain *d, xen_mem_paging_op_t *mpo);
+#endif /*__ASM_X86_MEM_PAGING_H__ */
/*
* Local variables:
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-struct xen_mem_event_op {
- uint8_t op; /* XENMEM_*_op_* */
+struct xen_mem_paging_op {
+ uint8_t op; /* XENMEM_paging_op_* */
domid_t domain;
-
/* PAGING_PREP IN: buffer to immediately fill page in */
uint64_aligned_t buffer;
/* Other OPs */
uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
-typedef struct xen_mem_event_op xen_mem_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+typedef struct xen_mem_paging_op xen_mem_paging_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
#define XENMEM_access_op 21
#define XENMEM_access_op_resume 0