{
struct xen_hvm_get_mem_access a;
struct domain *d;
- hvmmem_access_t access;
+ xenmem_access_t access;
if ( copy_from_guest(&a, arg, 1) )
return -EFAULT;
#include <xen/xmalloc.h>
#include <xen/efi.h>
#include <xen/grant_table.h>
+#include <xen/hypercall.h>
#include <asm/paging.h>
#include <asm/shadow.h>
#include <asm/page.h>
return rc;
}
-long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;
+ int op = cmd & MEMOP_CMD_MASK;
switch ( op )
{
}
default:
- return subarch_memory_op(op, arg);
+ return subarch_memory_op(cmd, arg);
}
return 0;
*/
+#include <xen/sched.h>
+#include <xen/guest_access.h>
+#include <xen/hypercall.h>
#include <asm/p2m.h>
#include <asm/mem_event.h>
+#include <xsm/xsm.h>
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo)
+int mem_access_memop(unsigned long cmd,
+ XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
{
- int rc;
+ long rc;
+ xen_mem_access_op_t mao;
+ struct domain *d;
+ if ( copy_from_guest(&mao, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d);
+ if ( rc )
+ return rc;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto out;
+
+ rc = xsm_mem_event_op(XSM_TARGET, d, XENMEM_access_op);
+ if ( rc )
+ goto out;
+
+ rc = -ENODEV;
if ( unlikely(!d->mem_event->access.ring_page) )
- return -ENODEV;
+ goto out;
- switch( meo->op )
+ switch ( mao.op )
{
case XENMEM_access_op_resume:
- {
p2m_mem_access_resume(d);
rc = 0;
+ break;
+
+ case XENMEM_access_op_set_access:
+ {
+ unsigned long start_iter = cmd & ~MEMOP_CMD_MASK;
+
+ rc = -EINVAL;
+ if ( (mao.pfn != ~0ull) &&
+ (mao.nr < start_iter ||
+ ((mao.pfn + mao.nr - 1) < mao.pfn) ||
+ ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
+ break;
+
+ rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter,
+ MEMOP_CMD_MASK, mao.access);
+ if ( rc > 0 )
+ {
+ ASSERT(!(rc & MEMOP_CMD_MASK));
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
+ XENMEM_access_op | rc, arg);
+ }
+ break;
+ }
+
+ case XENMEM_access_op_get_access:
+ {
+ xenmem_access_t access;
+
+ rc = -EINVAL;
+ if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull )
+ break;
+
+ rc = p2m_get_mem_access(d, mao.pfn, &access);
+ if ( rc != 0 )
+ break;
+
+ mao.access = access;
+ rc = __copy_field_to_guest(arg, &mao, access) ? -EFAULT : 0;
+
+ break;
}
- break;
default:
rc = -ENOSYS;
break;
}
+ out:
+ rcu_unlock_domain(d);
return rc;
}
case XENMEM_paging_op:
ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
break;
- case XENMEM_access_op:
- ret = mem_access_memop(d, (xen_mem_event_op_t *) arg);
- break;
case XENMEM_sharing_op:
ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
break;
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
- uint32_t start, uint32_t mask, hvmmem_access_t access)
+ uint32_t start, uint32_t mask, xenmem_access_t access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_access_t a, _a;
long rc = 0;
static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [HVMMEM_access_##ac] = p2m_access_##ac
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
ACCESS(n),
ACCESS(r),
ACCESS(w),
case 0 ... ARRAY_SIZE(memaccess) - 1:
a = memaccess[access];
break;
- case HVMMEM_access_default:
+ case XENMEM_access_default:
a = p2m->default_access;
break;
default:
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
int p2m_get_mem_access(struct domain *d, unsigned long pfn,
- hvmmem_access_t *access)
+ xenmem_access_t *access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t t;
p2m_access_t a;
mfn_t mfn;
- static const hvmmem_access_t memaccess[] = {
- HVMMEM_access_n,
- HVMMEM_access_r,
- HVMMEM_access_w,
- HVMMEM_access_rw,
- HVMMEM_access_x,
- HVMMEM_access_rx,
- HVMMEM_access_wx,
- HVMMEM_access_rwx,
- HVMMEM_access_rx2rw
+ static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+ ACCESS(rx2rw),
+ ACCESS(n2rwx),
+#undef ACCESS
};
/* If request to get default access */
#include <compat/xen.h>
#include <asm/mem_event.h>
#include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int entries)
{
desc_lo | ((u64)desc_hi << 32));
}
-int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct compat_machphys_mfn_list xmml;
l2_pgentry_t l2e;
compat_pfn_t mfn;
unsigned int i;
int rc = 0;
+ int op = cmd & MEMOP_CMD_MASK;
switch ( op )
{
XLAT_foreign_memory_map(nat, &cmp);
#undef XLAT_memory_map_HNDL_buffer
- rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+ rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
break;
}
XLAT_memory_map(nat, &cmp);
#undef XLAT_memory_map_HNDL_buffer
- rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+ rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
if ( rc < 0 )
break;
XLAT_pod_target(nat, &cmp);
- rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+ rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void));
if ( rc < 0 )
break;
return mem_sharing_get_nr_shared_mfns();
case XENMEM_paging_op:
- case XENMEM_access_op:
{
xen_mem_event_op_t meo;
if ( copy_from_guest(&meo, arg, 1) )
return -EFAULT;
break;
}
+
+ case XENMEM_access_op:
+ rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
+ break;
+
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
#include <xen/numa.h>
#include <xen/nodemask.h>
#include <xen/guest_access.h>
+#include <xen/hypercall.h>
#include <asm/current.h>
#include <asm/asm_defns.h>
#include <asm/page.h>
#include <asm/numa.h>
#include <asm/mem_event.h>
#include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
#include <public/memory.h>
/* Parameters for PFN/MADDR compression. */
}
}
-long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct xen_machphys_mfn_list xmml;
l3_pgentry_t l3e;
xen_pfn_t mfn, last_mfn;
unsigned int i;
long rc = 0;
+ int op = cmd & MEMOP_CMD_MASK;
switch ( op )
{
return mem_sharing_get_nr_shared_mfns();
case XENMEM_paging_op:
- case XENMEM_access_op:
{
xen_mem_event_op_t meo;
if ( copy_from_guest(&meo, arg, 1) )
return -EFAULT;
break;
}
+
+ case XENMEM_access_op:
+ rc = mem_access_memop(cmd, guest_handle_cast(arg, xen_mem_access_op_t));
+ break;
+
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
#undef compat_domid_t
#undef xen_domid_t
+CHECK_mem_access_op;
+
int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
{
int split, op = cmd & MEMOP_CMD_MASK;
break;
default:
- rc = arch_memory_op(op, arg);
+ rc = arch_memory_op(cmd, arg);
break;
}
#ifndef _XEN_ASM_MEM_ACCESS_H
#define _XEN_ASM_MEM_ACCESS_H
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo);
+int mem_access_memop(unsigned long cmd,
+ XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
int mem_access_send_req(struct domain *d, mem_event_request_t *req);
#endif /* _XEN_ASM_MEM_ACCESS_H */
int __sync_local_execstate(void);
/* Arch-specific portion of memory_op hypercall. */
-long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
+long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
+long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
+int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
int steal_page(
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
- uint32_t start, uint32_t mask, hvmmem_access_t access);
+ uint32_t start, uint32_t mask, xenmem_access_t access);
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
-int p2m_get_mem_access(struct domain *d, unsigned long pfn,
- hvmmem_access_t *access);
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+ xenmem_access_t *access);
/*
* Internal functions, only called by other p2m code
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-
struct xen_mem_event_op {
uint8_t op; /* XENMEM_*_op_* */
domid_t domain;
typedef struct xen_mem_event_op xen_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+#define XENMEM_access_op 21
+#define XENMEM_access_op_resume 0
+#define XENMEM_access_op_set_access 1
+#define XENMEM_access_op_get_access 2
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ /* xenmem_access_t */
+ uint8_t access;
+ domid_t domid;
+ /*
+ * Number of pages for set op
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
#define XENMEM_sharing_op 22
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
! memory_exchange memory.h
! memory_map memory.h
! memory_reservation memory.h
+? mem_access_op memory.h
! pod_target memory.h
! remove_from_physmap memory.h
? physdev_eoi physdev.h