uint64_t vcpumap[XC_SR_MAX_VCPUS/64];
uint64_t identpt;
uint64_t paging_ring_pfn;
- uint64_t access_ring_pfn;
+ uint64_t monitor_ring_pfn;
uint64_t sharing_ring_pfn;
uint64_t vm86_tss;
uint64_t console_pfn;
// DPRINTF("paging ring pfn address: %llx\n", buf->paging_ring_pfn);
return pagebuf_get_one(xch, ctx, buf, fd, dom);
- case XC_SAVE_ID_HVM_ACCESS_RING_PFN:
+ case XC_SAVE_ID_HVM_MONITOR_RING_PFN:
/* Skip padding 4 bytes then read the mem access ring location. */
- if ( RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint32_t)) ||
- RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint64_t)) )
+ if ( RDEXACT(fd, &buf->monitor_ring_pfn, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->monitor_ring_pfn, sizeof(uint64_t)) )
{
PERROR("error read the access ring pfn");
return -1;
}
- // DPRINTF("access ring pfn address: %llx\n", buf->access_ring_pfn);
+ // DPRINTF("monitor ring pfn address: %llx\n", buf->monitor_ring_pfn);
return pagebuf_get_one(xch, ctx, buf, fd, dom);
case XC_SAVE_ID_HVM_SHARING_RING_PFN:
xc_hvm_param_set(xch, dom, HVM_PARAM_IDENT_PT, pagebuf.identpt);
if ( pagebuf.paging_ring_pfn )
xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN, pagebuf.paging_ring_pfn);
- if ( pagebuf.access_ring_pfn )
- xc_hvm_param_set(xch, dom, HVM_PARAM_ACCESS_RING_PFN, pagebuf.access_ring_pfn);
+ if ( pagebuf.monitor_ring_pfn )
+ xc_hvm_param_set(xch, dom, HVM_PARAM_MONITOR_RING_PFN, pagebuf.monitor_ring_pfn);
if ( pagebuf.sharing_ring_pfn )
xc_hvm_param_set(xch, dom, HVM_PARAM_SHARING_RING_PFN, pagebuf.sharing_ring_pfn);
if ( pagebuf.vm86_tss )
goto out;
}
- chunk.id = XC_SAVE_ID_HVM_ACCESS_RING_PFN;
+ chunk.id = XC_SAVE_ID_HVM_MONITOR_RING_PFN;
chunk.data = 0;
- xc_hvm_param_get(xch, dom, HVM_PARAM_ACCESS_RING_PFN, &chunk.data);
+ xc_hvm_param_get(xch, dom, HVM_PARAM_MONITOR_RING_PFN, &chunk.data);
if ( (chunk.data != 0) &&
wrexact(io_fd, &chunk, sizeof(chunk)) )
special_pfn(SPECIALPAGE_CONSOLE));
xc_hvm_param_set(xch, dom, HVM_PARAM_PAGING_RING_PFN,
special_pfn(SPECIALPAGE_PAGING));
- xc_hvm_param_set(xch, dom, HVM_PARAM_ACCESS_RING_PFN,
+ xc_hvm_param_set(xch, dom, HVM_PARAM_MONITOR_RING_PFN,
special_pfn(SPECIALPAGE_ACCESS));
xc_hvm_param_set(xch, dom, HVM_PARAM_SHARING_RING_PFN,
special_pfn(SPECIALPAGE_SHARING));
void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
{
- return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN,
+ return xc_mem_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
port, 0);
}
void *xc_mem_access_enable_introspection(xc_interface *xch, domid_t domain_id,
uint32_t *port)
{
- return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN,
+ return xc_mem_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
port, 1);
}
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
{
return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE,
- XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
+ XEN_MEM_EVENT_MONITOR_DISABLE,
+ XEN_DOMCTL_MEM_EVENT_OP_MONITOR,
NULL);
}
switch ( param )
{
case HVM_PARAM_PAGING_RING_PFN:
- op = XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE;
+ op = XEN_MEM_EVENT_PAGING_ENABLE;
mode = XEN_DOMCTL_MEM_EVENT_OP_PAGING;
break;
- case HVM_PARAM_ACCESS_RING_PFN:
+ case HVM_PARAM_MONITOR_RING_PFN:
if ( enable_introspection )
- op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION;
+ op = XEN_MEM_EVENT_MONITOR_ENABLE_INTROSPECTION;
else
- op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE;
- mode = XEN_DOMCTL_MEM_EVENT_OP_ACCESS;
+ op = XEN_MEM_EVENT_MONITOR_ENABLE;
+ mode = XEN_DOMCTL_MEM_EVENT_OP_MONITOR;
break;
case HVM_PARAM_SHARING_RING_PFN:
- op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE;
+ op = XEN_MEM_EVENT_SHARING_ENABLE;
mode = XEN_DOMCTL_MEM_EVENT_OP_SHARING;
break;
}
return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
+ XEN_MEM_EVENT_PAGING_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_PAGING,
port);
}
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
{
return xc_mem_event_control(xch, domain_id,
- XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE,
+ XEN_MEM_EVENT_PAGING_DISABLE,
XEN_DOMCTL_MEM_EVENT_OP_PAGING,
NULL);
}
}
return xc_mem_event_control(xch, domid,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE,
+ XEN_MEM_EVENT_SHARING_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_SHARING,
port);
}
domid_t domid)
{
return xc_mem_event_control(xch, domid,
- XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE,
+ XEN_MEM_EVENT_SHARING_DISABLE,
XEN_DOMCTL_MEM_EVENT_OP_SHARING,
NULL);
}
#define XC_SAVE_ID_HVM_GENERATION_ID_ADDR -14
/* Markers for the pfn's hosting these mem event rings */
#define XC_SAVE_ID_HVM_PAGING_RING_PFN -15
-#define XC_SAVE_ID_HVM_ACCESS_RING_PFN -16
+#define XC_SAVE_ID_HVM_MONITOR_RING_PFN -16
#define XC_SAVE_ID_HVM_SHARING_RING_PFN -17
#define XC_SAVE_ID_TOOLSTACK -18 /* Optional toolstack specific info */
/* These are a pair; it is an error for one to exist without the other */
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
}
-void hvm_mem_event_emulate_one(bool_t nowrite, unsigned int trapnr,
+void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
if ( !(parameters & HVMPME_MODE_MASK) )
return 0;
- rc = mem_event_claim_slot(d, &d->mem_event->access);
+ rc = mem_event_claim_slot(d, &d->mem_event->monitor);
if ( rc == -ENOSYS )
{
/* If there was no ring to handle the event, then
}
hvm_mem_event_fill_regs(req);
- mem_event_put_request(d, &d->mem_event->access, req);
+ mem_event_put_request(d, &d->mem_event->monitor, req);
return 1;
}
return;
if ( unlikely(d->arch.hvm_domain.introspection_enabled) &&
- mem_event_check_ring(&d->mem_event->access) )
+ mem_event_check_ring(&d->mem_event->monitor) )
{
unsigned int i;
req->regs.x86.cs_arbytes = seg.attr.bytes;
}
-void p2m_mem_event_emulate_check(struct vcpu *v,
+void p2m_mem_access_emulate_check(struct vcpu *v,
const mem_event_response_t *rsp)
{
/* Mark vcpu for skipping one instruction upon rescheduling. */
gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the message along */
- if ( !mem_event_check_ring(&d->mem_event->access) || !req_ptr )
+ if ( !mem_event_check_ring(&d->mem_event->monitor) || !req_ptr )
{
/* No listener */
if ( p2m->access_required )
if ( v->arch.mem_event.emulate_flags )
{
- hvm_mem_event_emulate_one((v->arch.mem_event.emulate_flags &
- MEM_ACCESS_EMULATE_NOWRITE) != 0,
- TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ hvm_mem_access_emulate_one((v->arch.mem_event.emulate_flags &
+ MEM_ACCESS_EMULATE_NOWRITE) != 0,
+ TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
v->arch.mem_event.emulate_flags = 0;
return 1;
mem_event_response_t rsp;
/* Pull all responses off the ring. */
- while ( mem_event_get_response(d, &d->mem_event->access, &rsp) )
+ while ( mem_event_get_response(d, &d->mem_event->monitor, &rsp) )
{
struct vcpu *v;
v = d->vcpu[rsp.vcpu_id];
- p2m_mem_event_emulate_check(v, &rsp);
+ p2m_mem_access_emulate_check(v, &rsp);
/* Unpause domain. */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
goto out;
rc = -ENODEV;
- if ( unlikely(!d->mem_event->access.ring_page) )
+ if ( unlikely(!d->mem_event->monitor.ring_page) )
goto out;
switch ( mao.op )
int mem_access_send_req(struct domain *d, mem_event_request_t *req)
{
- int rc = mem_event_claim_slot(d, &d->mem_event->access);
+ int rc = mem_event_claim_slot(d, &d->mem_event->monitor);
if ( rc < 0 )
return rc;
- mem_event_put_request(d, &d->mem_event->access, req);
+ mem_event_put_request(d, &d->mem_event->monitor, req);
return 0;
}
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_access_notification(struct vcpu *v, unsigned int port)
{
- if ( likely(v->domain->mem_event->access.ring_page != NULL) )
+ if ( likely(v->domain->mem_event->monitor.ring_page != NULL) )
mem_access_resume(v->domain);
}
#endif
void mem_event_cleanup(struct domain *d)
{
#ifdef HAS_MEM_PAGING
- if ( d->mem_event->paging.ring_page ) {
+ if ( d->mem_event->paging.ring_page )
+ {
/* Destroying the wait queue head means waking up all
* queued vcpus. This will drain the list, allowing
* the disable routine to complete. It will also drop
}
#endif
#ifdef HAS_MEM_ACCESS
- if ( d->mem_event->access.ring_page ) {
- destroy_waitqueue_head(&d->mem_event->access.wq);
- (void)mem_event_disable(d, &d->mem_event->access);
+ if ( d->mem_event->monitor.ring_page )
+ {
+ destroy_waitqueue_head(&d->mem_event->monitor.wq);
+ (void)mem_event_disable(d, &d->mem_event->monitor);
}
#endif
#ifdef HAS_MEM_SHARING
- if ( d->mem_event->share.ring_page ) {
+ if ( d->mem_event->share.ring_page )
+ {
destroy_waitqueue_head(&d->mem_event->share.wq);
(void)mem_event_disable(d, &d->mem_event->share);
}
switch( mec->op )
{
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
+ case XEN_MEM_EVENT_PAGING_ENABLE:
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
}
break;
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
+ case XEN_MEM_EVENT_PAGING_DISABLE:
{
if ( med->ring_page )
rc = mem_event_disable(d, med);
#endif
#ifdef HAS_MEM_ACCESS
- case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
+ case XEN_DOMCTL_MEM_EVENT_OP_MONITOR:
{
- struct mem_event_domain *med = &d->mem_event->access;
+ struct mem_event_domain *med = &d->mem_event->monitor;
rc = -EINVAL;
switch( mec->op )
{
- case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
- case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION:
+ case XEN_MEM_EVENT_MONITOR_ENABLE:
+ case XEN_MEM_EVENT_MONITOR_ENABLE_INTROSPECTION:
{
rc = -ENODEV;
if ( !p2m_mem_event_sanity_check(d) )
break;
rc = mem_event_enable(d, mec, med, _VPF_mem_access,
- HVM_PARAM_ACCESS_RING_PFN,
+ HVM_PARAM_MONITOR_RING_PFN,
mem_access_notification);
- if ( mec->op == XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION
+ if ( mec->op == XEN_MEM_EVENT_MONITOR_ENABLE_INTROSPECTION
&& !rc )
p2m_setup_introspection(d);
}
break;
- case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
+ case XEN_MEM_EVENT_MONITOR_DISABLE:
{
if ( med->ring_page )
{
switch( mec->op )
{
- case XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE:
+ case XEN_MEM_EVENT_SHARING_ENABLE:
{
rc = -EOPNOTSUPP;
/* pvh fixme: p2m_is_foreign types need addressing */
}
break;
- case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE:
+ case XEN_MEM_EVENT_SHARING_DISABLE:
{
if ( med->ring_page )
rc = mem_event_disable(d, med);
} p2m_type_t;
static inline
-void p2m_mem_event_emulate_check(struct vcpu *v,
- const mem_event_response_t *rsp)
+void p2m_mem_access_emulate_check(struct vcpu *v,
+ const mem_event_response_t *rsp)
{
/* Not supported on ARM. */
-};
+}
static inline
void p2m_setup_introspection(struct domain *d)
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_no_write(
struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_event_emulate_one(bool_t nowrite,
+void hvm_mem_access_emulate_one(bool_t nowrite,
unsigned int trapnr,
unsigned int errcode);
void hvm_emulate_prepare(
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
-void p2m_mem_event_emulate_check(struct vcpu *v,
- const mem_event_response_t *rsp);
+void p2m_mem_access_emulate_check(struct vcpu *v,
+ const mem_event_response_t *rsp);
/* Enable arch specific introspection options (such as MSR interception). */
void p2m_setup_introspection(struct domain *d);
* pager<->hypervisor interface. Use XENMEM_paging_op*
* to perform per-page operations.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several
+ * The XEN_MEM_EVENT_PAGING_ENABLE domctl returns several
* non-standard error codes to indicate why paging could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EMLINK - guest has iommu passthrough enabled
*/
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
+#define XEN_MEM_EVENT_PAGING_ENABLE 0
+#define XEN_MEM_EVENT_PAGING_DISABLE 1
/*
- * Access permissions.
+ * Monitor helper.
*
* As with paging, use the domctl for teardown/setup of the
* helper<->hypervisor interface.
*
- * There are HVM hypercalls to set the per-page access permissions of every
- * page in a domain. When one of these permissions--independent, read,
- * write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. (See public/mem_event.h) .
+ * The monitor interface can be used to register for various VM events. For
+ * example, there are HVM hypercalls to set the per-page access permissions
+ * of every page in a domain. When one of these permissions--independent,
+ * read, write, and execute--is violated, the VCPU is paused and a memory event
+ * is sent with what happened. The memory event handler can then resume the
+ * VCPU and redo the access with a XENMEM_access_op_resume hypercall.
*
- * The memory event handler can then resume the VCPU and redo the access
- * with a XENMEM_access_op_resume hypercall.
+ * See public/mem_event.h for the list of available events that can be
+ * subscribed to via the monitor interface.
*
- * The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several
+ * To enable MOV-TO-MSR interception on x86, it is necessary to enable this
+ * interface with the XEN_MEM_EVENT_MONITOR_ENABLE_INTROSPECTION
+ * operator.
+ *
+ * The XEN_MEM_EVENT_MONITOR_ENABLE* domctls return several
* non-standard error codes to indicate why access could not be enabled:
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
+ *
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
+#define XEN_DOMCTL_MEM_EVENT_OP_MONITOR 2
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION 2
+#define XEN_MEM_EVENT_MONITOR_ENABLE 0
+#define XEN_MEM_EVENT_MONITOR_DISABLE 1
+#define XEN_MEM_EVENT_MONITOR_ENABLE_INTROSPECTION 2
/*
* Sharing ENOMEM helper.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1
+#define XEN_MEM_EVENT_SHARING_ENABLE 0
+#define XEN_MEM_EVENT_SHARING_DISABLE 1
/* Use for teardown/setup of helper<->hypervisor interface for paging,
* access and sharing.*/
struct xen_domctl_mem_event_op {
- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
+ uint32_t op; /* XEN_MEM_EVENT_*_* */
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
/* Params for the mem event rings */
#define HVM_PARAM_PAGING_RING_PFN 27
-#define HVM_PARAM_ACCESS_RING_PFN 28
+#define HVM_PARAM_MONITOR_RING_PFN 28
#define HVM_PARAM_SHARING_RING_PFN 29
/* SHUTDOWN_* action in case of a triple fault */
struct mem_event_domain share;
/* Memory paging support */
struct mem_event_domain paging;
- /* Memory access support */
- struct mem_event_domain access;
+ /* VM event monitor support */
+ struct mem_event_domain monitor;
};
struct evtchn_port_ops;