mem_event: use different ringbuffers for share, paging and access
authorOlaf Hering <olaf@aepfle.de>
Fri, 16 Sep 2011 11:19:26 +0000 (12:19 +0100)
committerOlaf Hering <olaf@aepfle.de>
Fri, 16 Sep 2011 11:19:26 +0000 (12:19 +0100)
Up to now a single ring buffer was used for mem_share, xenpaging and
xen-access.  Each helper would have to cooperate and pull only its own
requests from the ring.  Unfortunately this was not implemented. And
even if it was, it would make the whole concept fragile because a crash
or early exit of one helper would stall the others.

What happend up to now is that active xenpaging + memory_sharing would
push memsharing requests in the buffer. xenpaging is not prepared for
such requests.

This patch creates an independet ring buffer for mem_share, xenpaging
and xen-access and adds also new functions to enable xenpaging and
xen-access. The xc_mem_event_enable/xc_mem_event_disable functions will
be removed. The various XEN_DOMCTL_MEM_EVENT_* macros were cleaned up.
Due to the removal the API changed, so the SONAME will be changed too.

Signed-off-by: Olaf Hering <olaf@aepfle.de>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Tim Deegan <tim@xen.org>
16 files changed:
tools/libxc/Makefile
tools/libxc/xc_mem_access.c
tools/libxc/xc_mem_event.c
tools/libxc/xc_mem_paging.c
tools/libxc/xc_memshr.c
tools/libxc/xenctrl.h
tools/tests/xen-access/xen-access.c
tools/xenpaging/xenpaging.c
xen/arch/ia64/xen/dom0_ops.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm/mem_event.c
xen/arch/x86/mm/mem_paging.c
xen/arch/x86/mm/mem_sharing.c
xen/arch/x86/mm/p2m.c
xen/include/public/domctl.h
xen/include/xen/sched.h

index 6b00abab50f8ee9cb63c5fd7eeb46655b6de7e87..29b47243c02efc244e9caad34f6c7077b88faf07 100644 (file)
@@ -1,7 +1,7 @@
 XEN_ROOT = $(CURDIR)/../..
 include $(XEN_ROOT)/tools/Rules.mk
 
-MAJOR    = 4.0
+MAJOR    = 4.2
 MINOR    = 0
 
 CTRL_SRCS-y       :=
index b9e66bbd070bac23a675d288c7a602fd2a88f1b7..66068785676be4b1eaabd6dc3a990341a8e8e291 100644 (file)
 #include "xc_private.h"
 
 
+int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
+                        void *shared_page, void *ring_page)
+{
+    return xc_mem_event_control(xch, domain_id,
+                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE,
+                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
+                                shared_page, ring_page, INVALID_MFN);
+}
+
+int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
+{
+    return xc_mem_event_control(xch, domain_id,
+                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE,
+                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
+                                NULL, NULL, INVALID_MFN);
+}
+
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn)
 {
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME,
-                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS, NULL, NULL,
-                                gfn);
+                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
+                                NULL, NULL, gfn);
 }
 
 /*
index ef8d4caa20099fa4eece2044cf6c5596c66dc33f..b40d2697093b12c43344874d3e376730efe95772 100644 (file)
@@ -42,18 +42,3 @@ int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
     return do_domctl(xch, &domctl);
 }
 
-int xc_mem_event_enable(xc_interface *xch, domid_t domain_id,
-                        void *shared_page, void *ring_page)
-{
-    return xc_mem_event_control(xch, domain_id,
-                                XEN_DOMCTL_MEM_EVENT_OP_ENABLE, 0,
-                                shared_page, ring_page, INVALID_MFN);
-}
-
-int xc_mem_event_disable(xc_interface *xch, domid_t domain_id)
-{
-    return xc_mem_event_control(xch, domain_id,
-                                XEN_DOMCTL_MEM_EVENT_OP_DISABLE, 0,
-                                NULL, NULL, INVALID_MFN);
-}
-
index db5be452f50e2c200fcb3e428cef781d86020cfc..f917403b3940a800f3871f2b421d99faf6e98c61 100644 (file)
 #include "xc_private.h"
 
 
+int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
+                        void *shared_page, void *ring_page)
+{
+    return xc_mem_event_control(xch, domain_id,
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                shared_page, ring_page, INVALID_MFN);
+}
+
+int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
+{
+    return xc_mem_event_control(xch, domain_id,
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE,
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                NULL, NULL, INVALID_MFN);
+}
+
 int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn)
 {
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE,
-                                XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL,
-                                gfn);
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                NULL, NULL, gfn);
 }
 
 int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn)
 {
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT,
-                                XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL,
-                                gfn);
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                NULL, NULL, gfn);
 }
 
 int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn)
 {
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP,
-                                XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL,
-                                gfn);
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                NULL, NULL, gfn);
 }
 
 int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn)
 {
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME,
-                                XEN_DOMCTL_MEM_EVENT_OP_PAGING, NULL, NULL,
-                                gfn);
+                                XEN_DOMCTL_MEM_EVENT_OP_PAGING,
+                                NULL, NULL, gfn);
 }
 
 
index 8e5faf4bb423bc86a151091cdb2d4a478fee3243..eaa9d2771e04f0e8e5e726c9af5351f65ca1e9f6 100644 (file)
@@ -36,7 +36,7 @@ int xc_memshr_control(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_CONTROL;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL;
     op->u.enable = enable;
 
     return do_domctl(xch, &domctl);
@@ -55,7 +55,7 @@ int xc_memshr_nominate_gfn(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN;
     op->u.nominate.u.gfn = gfn;
 
     ret = do_domctl(xch, &domctl);
@@ -77,7 +77,7 @@ int xc_memshr_nominate_gref(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF;
     op->u.nominate.u.grant_ref = gref;
 
     ret = do_domctl(xch, &domctl);
@@ -97,7 +97,7 @@ int xc_memshr_share(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = 0;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_SHARE;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE;
     op->u.share.source_handle = source_handle;
     op->u.share.client_handle = client_handle;
 
@@ -114,7 +114,7 @@ int xc_memshr_domain_resume(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_RESUME;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME;
 
     return do_domctl(xch, &domctl);
 }
@@ -130,7 +130,7 @@ int xc_memshr_debug_gfn(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN;
     op->u.debug.u.gfn = gfn;
 
     return do_domctl(xch, &domctl);
@@ -147,7 +147,7 @@ int xc_memshr_debug_mfn(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN;
     op->u.debug.u.mfn = mfn;
 
     return do_domctl(xch, &domctl);
@@ -164,7 +164,7 @@ int xc_memshr_debug_gref(xc_interface *xch,
     domctl.interface_version = XEN_DOMCTL_INTERFACE_VERSION;
     domctl.domain = (domid_t)domid;
     op = &(domctl.u.mem_sharing_op);
-    op->op = XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF;
+    op->op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF;
     op->u.debug.u.gref = gref;
 
     return do_domctl(xch, &domctl);
index 1b82ee0818c7e03597032d7b427fb444a9f2945a..4bcb9bd6c9d6135d8c987b824f3d11e5b11d3d68 100644 (file)
@@ -1758,16 +1758,19 @@ int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
                          unsigned int mode, void *shared_page,
                           void *ring_page, unsigned long gfn);
 
-int xc_mem_event_enable(xc_interface *xch, domid_t domain_id,
+int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
                         void *shared_page, void *ring_page);
-int xc_mem_event_disable(xc_interface *xch, domid_t domain_id);
-
+int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
                            unsigned long gfn);
 int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn);
 int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn);
 int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id,
                          unsigned long gfn);
+
+int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
+                        void *shared_page, void *ring_page);
+int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id,
                          unsigned long gfn);
 
index 2b3265e2fee0ae1b50132fb82fb225f0a6066494..4394f8f20302cf432c5d28c6ccf0d08da1873887 100644 (file)
@@ -241,7 +241,7 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id)
     mem_event_ring_lock_init(&xenaccess->mem_event);
 
     /* Initialise Xen */
-    rc = xc_mem_event_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id,
+    rc = xc_mem_access_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id,
                              xenaccess->mem_event.shared_page,
                              xenaccess->mem_event.ring_page);
     if ( rc != 0 )
@@ -351,7 +351,7 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t *xenaccess)
         return 0;
 
     /* Tear down domain xenaccess in Xen */
-    rc = xc_mem_event_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id);
+    rc = xc_mem_access_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id);
     if ( rc != 0 )
     {
         ERROR("Error tearing down domain xenaccess in xen");
index ebdba23db2192f74740ab36689c1a959d0c7e3d7..e2cbc614d0d7edd51bf8300ceb7470b44e55e006 100644 (file)
@@ -234,7 +234,7 @@ static xenpaging_t *xenpaging_init(domid_t domain_id, int num_pages)
                    PAGE_SIZE);
     
     /* Initialise Xen */
-    rc = xc_mem_event_enable(xch, paging->mem_event.domain_id,
+    rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
                              paging->mem_event.shared_page,
                              paging->mem_event.ring_page);
     if ( rc != 0 )
@@ -353,7 +353,7 @@ static int xenpaging_teardown(xenpaging_t *paging)
     xch = paging->xc_handle;
     paging->xc_handle = NULL;
     /* Tear down domain paging in Xen */
-    rc = xc_mem_event_disable(xch, paging->mem_event.domain_id);
+    rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id);
     if ( rc != 0 )
     {
         ERROR("Error tearing down domain paging in xen");
index 7eb446623aff8f6df47424bab342b725d8f3e0f8..f6ba0e3e031206ad370db9d94c2332ea3344cf02 100644 (file)
@@ -688,7 +688,7 @@ long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
 
         switch(mec->op)
         {
-            case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
+            case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL:
             {
                 if (mec->u.enable) {
                     ret = -EINVAL; /* not implemented */
index 7f1372fde775c44a1da6f9300813e676e1f488f8..bce2df8d0adad1da3fb4c3d332ba9d86f2545d78 100644 (file)
@@ -4025,7 +4025,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
     if ( (p & HVMPME_onchangeonly) && (value == old) )
         return 1;
     
-    rc = mem_event_check_ring(d, &d->mem_event);
+    rc = mem_event_check_ring(d, &d->mem_access);
     if ( rc )
         return rc;
     
@@ -4048,7 +4048,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
         req.gla_valid = 1;
     }
     
-    mem_event_put_request(d, &d->mem_event, &req);
+    mem_event_put_request(d, &d->mem_access, &req);
     
     return 1;
 }
index 05d5e0658da2aad7be6a566c3e4597f27e6588ab..c6c864dbd5535ad12ba5ede2a64eb0dd383acfbd 100644 (file)
 #define mem_event_ring_lock(_med)       spin_lock(&(_med)->ring_lock)
 #define mem_event_ring_unlock(_med)     spin_unlock(&(_med)->ring_lock)
 
-static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d,
+                            xen_domctl_mem_event_op_t *mec,
+                            struct mem_event_domain *med)
 {
     int rc;
+    struct domain *dom_mem_event = current->domain;
+    struct vcpu *v = current;
+    unsigned long ring_addr = mec->ring_addr;
+    unsigned long shared_addr = mec->shared_addr;
+    l1_pgentry_t l1e;
+    unsigned long gfn;
+    p2m_type_t p2mt;
+    mfn_t ring_mfn;
+    mfn_t shared_mfn;
+
+    /* Only one helper at a time. If the helper crashed,
+     * the ring is in an undefined state and so is the guest.
+     */
+    if ( med->ring_page )
+        return -EBUSY;
+
+    /* Get MFN of ring page */
+    guest_get_eff_l1e(v, ring_addr, &l1e);
+    gfn = l1e_get_pfn(l1e);
+    ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+
+    if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
+        return -EINVAL;
+
+    /* Get MFN of shared page */
+    guest_get_eff_l1e(v, shared_addr, &l1e);
+    gfn = l1e_get_pfn(l1e);
+    shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+
+    if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
+        return -EINVAL;
 
     /* Map ring and shared pages */
     med->ring_page = map_domain_page(mfn_x(ring_mfn));
-    if ( med->ring_page == NULL )
-        goto err;
-
     med->shared_page = map_domain_page(mfn_x(shared_mfn));
-    if ( med->shared_page == NULL )
-        goto err_ring;
 
     /* Allocate event channel */
     rc = alloc_unbound_xen_event_channel(d->vcpu[0],
                                          current->domain->domain_id);
     if ( rc < 0 )
-        goto err_shared;
+        goto err;
 
     ((mem_event_shared_page_t *)med->shared_page)->port = rc;
     med->xen_port = rc;
@@ -71,14 +99,14 @@ static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_
 
     return 0;
 
- err_shared:
+ err:
     unmap_domain_page(med->shared_page);
     med->shared_page = NULL;
- err_ring:
+
     unmap_domain_page(med->ring_page);
     med->ring_page = NULL;
- err:
-    return 1;
+
+    return rc;
 }
 
 static int mem_event_disable(struct mem_event_domain *med)
@@ -220,86 +248,79 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
 
     rc = -ENOSYS;
 
-    switch ( mec-> mode ) 
+    switch ( mec->mode )
     {
-    case 0:
+    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
     {
+        struct mem_event_domain *med = &d->mem_paging;
+        rc = -ENODEV;
+        /* Only HAP is supported */
+        if ( !hap_enabled(d) )
+            break;
+
+        /* Currently only EPT is supported */
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            break;
+
         switch( mec->op )
         {
-        case XEN_DOMCTL_MEM_EVENT_OP_ENABLE:
+        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
         {
-            struct domain *dom_mem_event = current->domain;
-            struct vcpu *v = current;
-            struct mem_event_domain *med = &d->mem_event;
-            unsigned long ring_addr = mec->ring_addr;
-            unsigned long shared_addr = mec->shared_addr;
-            l1_pgentry_t l1e;
-            unsigned long gfn;
-            p2m_type_t p2mt;
-            mfn_t ring_mfn;
-            mfn_t shared_mfn;
-
-            /* Only one xenpaging at a time. If xenpaging crashed,
-             * the cache is in an undefined state and so is the guest
-             */
-            rc = -EBUSY;
-            if ( med->ring_page )
-                break;
-
-            /* Currently only EPT is supported */
-            rc = -ENODEV;
-            if ( !(hap_enabled(d) &&
-                  (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) )
-                break;
-
-            /* Get MFN of ring page */
-            guest_get_eff_l1e(v, ring_addr, &l1e);
-            gfn = l1e_get_pfn(l1e);
-            ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+            rc = mem_event_enable(d, mec, med);
+        }
+        break;
 
-            rc = -EINVAL;
-            if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
-                break;
+        case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
+        {
+            rc = mem_event_disable(med);
+        }
+        break;
 
-            /* Get MFN of shared page */
-            guest_get_eff_l1e(v, shared_addr, &l1e);
-            gfn = l1e_get_pfn(l1e);
-            shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+        default:
+        {
+            if ( med->ring_page )
+                rc = mem_paging_domctl(d, mec, u_domctl);
+        }
+        break;
+        }
+    }
+    break;
 
-            rc = -EINVAL;
-            if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
-                break;
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
+    {
+        struct mem_event_domain *med = &d->mem_access;
+        rc = -ENODEV;
+        /* Only HAP is supported */
+        if ( !hap_enabled(d) )
+            break;
 
-            rc = -EINVAL;
-            if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 )
-                break;
+        /* Currently only EPT is supported */
+        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+            break;
 
-            rc = 0;
+        switch( mec->op )
+        {
+        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
+        {
+            rc = mem_event_enable(d, mec, med);
         }
         break;
 
-        case XEN_DOMCTL_MEM_EVENT_OP_DISABLE:
+        case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
         {
-            rc = mem_event_disable(&d->mem_event);
+            rc = mem_event_disable(&d->mem_access);
         }
         break;
 
         default:
-            rc = -ENOSYS;
-            break;
+        {
+            if ( med->ring_page )
+                rc = mem_access_domctl(d, mec, u_domctl);
         }
         break;
+        }
     }
-    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
-    {
-        rc = mem_paging_domctl(d, mec, u_domctl);
-        break;
-    }
-    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
-    {
-        rc = mem_access_domctl(d, mec, u_domctl);
-        break;
-    }
+    break;
     }
 
     return rc;
index 55b36f915509c93974b01f363f754b0cff84a593..5c020951639dff6d6cff2f7f38a3e40dcee51e9a 100644 (file)
 int mem_paging_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
                       XEN_GUEST_HANDLE(void) u_domctl)
 {
-    /* Only HAP is supported */
-    if ( !hap_enabled(d) )
-         return -ENODEV;
-
     switch( mec->op )
     {
     case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE:
index dd4825471ed2b4b439af28b4c401616128339d1f..c33795dc37fadb0a374a8300300806d2b6d0a38c 100644 (file)
@@ -281,12 +281,12 @@ static struct page_info* mem_sharing_alloc_page(struct domain *d,
     vcpu_pause_nosync(v);
     req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
 
-    if(mem_event_check_ring(d, &d->mem_event)) return page;
+    if(mem_event_check_ring(d, &d->mem_share)) return page;
 
     req.gfn = gfn;
     req.p2mt = p2m_ram_shared;
     req.vcpu_id = v->vcpu_id;
-    mem_event_put_request(d, &d->mem_event, &req);
+    mem_event_put_request(d, &d->mem_share, &req);
 
     return page;
 }
@@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct domain *d)
     mem_event_response_t rsp;
 
     /* Get request off the ring */
-    mem_event_get_response(&d->mem_event, &rsp);
+    mem_event_get_response(&d->mem_share, &rsp);
 
     /* Unpause domain/vcpu */
     if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
@@ -697,14 +697,14 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
 
     switch(mec->op)
     {
-        case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL:
         {
             d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
             rc = 0;
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN:
         {
             unsigned long gfn = mec->u.nominate.u.gfn;
             shr_handle_t handle;
@@ -715,7 +715,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF:
         {
             grant_ref_t gref = mec->u.nominate.u.grant_ref;
             unsigned long gfn;
@@ -730,7 +730,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_SHARE:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE:
         {
             shr_handle_t sh = mec->u.share.source_handle;
             shr_handle_t ch = mec->u.share.client_handle;
@@ -738,7 +738,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_RESUME:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME:
         {
             if(!mem_sharing_enabled(d))
                 return -EINVAL;
@@ -746,21 +746,21 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN:
         {
             unsigned long gfn = mec->u.debug.u.gfn;
             rc = mem_sharing_debug_gfn(d, gfn);
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN:
         {
             unsigned long mfn = mec->u.debug.u.mfn;
             rc = mem_sharing_debug_mfn(mfn);
         }
         break;
 
-        case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF:
+        case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF:
         {
             grant_ref_t gref = mec->u.debug.u.gref;
             rc = mem_sharing_debug_gref(d, gref);
index d8f622ffa3fba71fc9d453f8465a0707a8415fec..c62aa12da7eea69ffea3b97870d8e6c1214110ab 100644 (file)
@@ -755,7 +755,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
     mem_event_request_t req;
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d, &d->mem_event) == 0)
+    if ( mem_event_check_ring(d, &d->mem_paging) == 0)
     {
         /* Send release notification to pager */
         memset(&req, 0, sizeof(req));
@@ -763,7 +763,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
         req.gfn = gfn;
         req.vcpu_id = v->vcpu_id;
 
-        mem_event_put_request(d, &d->mem_event, &req);
+        mem_event_put_request(d, &d->mem_paging, &req);
     }
 }
 
@@ -775,7 +775,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     /* Check that there's space on the ring for this request */
-    if ( mem_event_check_ring(d, &d->mem_event) )
+    if ( mem_event_check_ring(d, &d->mem_paging) )
         return;
 
     memset(&req, 0, sizeof(req));
@@ -803,7 +803,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
     {
         /* gfn is already on its way back and vcpu is not paused */
-        mem_event_put_req_producers(&d->mem_event);
+        mem_event_put_req_producers(&d->mem_paging);
         return;
     }
 
@@ -812,7 +812,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
     req.p2mt = p2mt;
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &d->mem_event, &req);
+    mem_event_put_request(d, &d->mem_paging, &req);
 }
 
 int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
@@ -842,7 +842,7 @@ void p2m_mem_paging_resume(struct domain *d)
     mfn_t mfn;
 
     /* Pull the response off the ring */
-    mem_event_get_response(&d->mem_event, &rsp);
+    mem_event_get_response(&d->mem_paging, &rsp);
 
     /* Fix p2m entry if the page was not dropped */
     if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -889,7 +889,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the message along */
-    res = mem_event_check_ring(d, &d->mem_event);
+    res = mem_event_check_ring(d, &d->mem_access);
     if ( res < 0 ) 
     {
         /* No listener */
@@ -933,7 +933,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
     
     req.vcpu_id = v->vcpu_id;
 
-    mem_event_put_request(d, &d->mem_event, &req);
+    mem_event_put_request(d, &d->mem_access, &req);
 
     /* VCPU paused, mem event request sent */
 }
@@ -943,7 +943,7 @@ void p2m_mem_access_resume(struct p2m_domain *p2m)
     struct domain *d = p2m->domain;
     mem_event_response_t rsp;
 
-    mem_event_get_response(&d->mem_event, &rsp);
+    mem_event_get_response(&d->mem_access, &rsp);
 
     /* Unpause domain */
     if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
index c0dbc0234eab9aa3150fa8328b9b939066c68e80..84f236d563deb55928b09dfe27758ba76e0edd0b 100644 (file)
@@ -708,20 +708,18 @@ struct xen_domctl_gdbsx_domstatus {
 
 /* XEN_DOMCTL_mem_event_op */
 
-/* Add and remove memory handlers */
-#define XEN_DOMCTL_MEM_EVENT_OP_ENABLE     0
-#define XEN_DOMCTL_MEM_EVENT_OP_DISABLE    1
-
 /*
+* Domain memory paging
  * Page memory in and out. 
  */
 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING            1
 
-/* Domain memory paging */
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE   0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT      1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP       2
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME     3
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE     0
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE    1
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE   2
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT      3
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP       4
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME     5
 
 /*
  * Access permissions.
@@ -734,11 +732,14 @@ struct xen_domctl_gdbsx_domstatus {
  * ACCESS_RESUME mode for the following domctl.
  */
 #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS            2
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME     0 
+
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE     0
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE    1
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME     2
 
 struct xen_domctl_mem_event_op {
-    uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_* */
-    uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */
+    uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
+    uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_OP_* */
 
     /* OP_ENABLE */
     uint64_aligned_t shared_addr;  /* IN:  Virtual address of shared page */
@@ -755,14 +756,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
  */
 /* XEN_DOMCTL_mem_sharing_op */
 
-#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL        0
-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN   1
-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF  2
-#define XEN_DOMCTL_MEM_SHARING_OP_SHARE          3
-#define XEN_DOMCTL_MEM_SHARING_OP_RESUME         4
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN      5
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN      6
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF     7
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING                3
+
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL        0
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN   1
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF  2
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE          3
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME         4
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN      5
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN      6
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF     7
 
 #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID  (-10)
 #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID  (-9)
index 2bda1edd7baa01c4bbfab9f8fa5287a7da63a1f0..c6b3a5916fa3d29b8fe3c287dd1581139f0be6cb 100644 (file)
@@ -317,8 +317,12 @@ struct domain
     /* Non-migratable and non-restoreable? */
     bool_t disable_migrate;
 
+    /* Memory sharing support */
+    struct mem_event_domain mem_share;
     /* Memory paging support */
-    struct mem_event_domain mem_event;
+    struct mem_event_domain mem_paging;
+    /* Memory access support */
+    struct mem_event_domain mem_access;
 
     /* Currently computed from union of all vcpu cpu-affinity masks. */
     nodemask_t node_affinity;