if ( (p & HVMPME_onchangeonly) && (value == old) )
return 1;
- rc = mem_event_check_ring(d, &d->mem_access);
+ rc = mem_event_check_ring(d, &d->mem_event->access);
if ( rc )
return rc;
req.gla_valid = 1;
}
- mem_event_put_request(d, &d->mem_access, &req);
+ mem_event_put_request(d, &d->mem_event->access, &req);
return 1;
}
{
case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
{
- struct mem_event_domain *med = &d->mem_paging;
+ struct mem_event_domain *med = &d->mem_event->paging;
rc = -EINVAL;
switch( mec->op )
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
{
- struct mem_event_domain *med = &d->mem_access;
+ struct mem_event_domain *med = &d->mem_event->access;
rc = -EINVAL;
switch( mec->op )
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
{
if ( med->ring_page )
- rc = mem_event_disable(&d->mem_access);
+ rc = mem_event_disable(med);
}
break;
vcpu_pause_nosync(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- if(mem_event_check_ring(d, &d->mem_share)) return page;
+ if(mem_event_check_ring(d, &d->mem_event->share)) return page;
req.gfn = gfn;
req.p2mt = p2m_ram_shared;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_share, &req);
+ mem_event_put_request(d, &d->mem_event->share, &req);
return page;
}
mem_event_response_t rsp;
/* Get request off the ring */
- mem_event_get_response(&d->mem_share, &rsp);
+ mem_event_get_response(&d->mem_event->share, &rsp);
/* Unpause domain/vcpu */
if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
mem_event_request_t req;
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_paging) == 0)
+ if ( mem_event_check_ring(d, &d->mem_event->paging) == 0)
{
/* Send release notification to pager */
memset(&req, 0, sizeof(req));
req.gfn = gfn;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_paging, &req);
+ mem_event_put_request(d, &d->mem_event->paging, &req);
}
}
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_paging) )
+ if ( mem_event_check_ring(d, &d->mem_event->paging) )
return;
memset(&req, 0, sizeof(req));
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
{
/* gfn is already on its way back and vcpu is not paused */
- mem_event_put_req_producers(&d->mem_paging);
+ mem_event_put_req_producers(&d->mem_event->paging);
return;
}
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_paging, &req);
+ mem_event_put_request(d, &d->mem_event->paging, &req);
}
/**
mfn_t mfn;
/* Pull the response off the ring */
- mem_event_get_response(&d->mem_paging, &rsp);
+ mem_event_get_response(&d->mem_event->paging, &rsp);
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
p2m_unlock(p2m);
/* Otherwise, check if there is a memory event listener, and send the message along */
- res = mem_event_check_ring(d, &d->mem_access);
+ res = mem_event_check_ring(d, &d->mem_event->access);
if ( res < 0 )
{
/* No listener */
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_access, &req);
+ mem_event_put_request(d, &d->mem_event->access, &req);
/* VCPU paused, mem event request sent */
}
struct domain *d = p2m->domain;
mem_event_response_t rsp;
- mem_event_get_response(&d->mem_access, &rsp);
+ mem_event_get_response(&d->mem_event->access, &rsp);
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
init_status |= INIT_gnttab;
poolid = 0;
+
+ d->mem_event = xzalloc(struct mem_event_per_domain);
+ if ( !d->mem_event )
+ goto fail;
}
if ( arch_domain_create(d, domcr_flags) != 0 )
fail:
d->is_dying = DOMDYING_dead;
atomic_set(&d->refcnt, DOMAIN_DESTROYED);
+ xfree(d->mem_event);
if ( init_status & INIT_arch )
arch_domain_destroy(d);
if ( init_status & INIT_gnttab )
int xen_port;
};
+struct mem_event_per_domain
+{
+ /* Memory sharing support */
+ struct mem_event_domain share;
+ /* Memory paging support */
+ struct mem_event_domain paging;
+ /* Memory access support */
+ struct mem_event_domain access;
+};
+
struct domain
{
domid_t domain_id;
/* Non-migratable and non-restoreable? */
bool_t disable_migrate;
- /* Memory sharing support */
- struct mem_event_domain mem_share;
- /* Memory paging support */
- struct mem_event_domain mem_paging;
- /* Memory access support */
- struct mem_event_domain mem_access;
+ /* Various mem_events */
+ struct mem_event_per_domain *mem_event;
/* Currently computed from union of all vcpu cpu-affinity masks. */
nodemask_t node_affinity;