void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
{
- return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN, port);
+ return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN,
+ port, 0);
+}
+
+void *xc_mem_access_enable_introspection(xc_interface *xch, domid_t domain_id,
+ uint32_t *port)
+{
+ return xc_mem_event_enable(xch, domain_id, HVM_PARAM_ACCESS_RING_PFN,
+ port, 1);
}
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
}
void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
- uint32_t *port)
+ uint32_t *port, int enable_introspection)
{
void *ring_page = NULL;
uint64_t pfn;
break;
case HVM_PARAM_ACCESS_RING_PFN:
- op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE;
+ if ( enable_introspection )
+ op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION;
+ else
+ op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE;
mode = XEN_DOMCTL_MEM_EVENT_OP_ACCESS;
break;
* param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
*/
void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
- uint32_t *port);
+ uint32_t *port, int enable_introspection);
#endif /* __XC_PRIVATE_H__ */
* Caller has to unmap this page when done.
*/
void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
+void *xc_mem_access_enable_introspection(xc_interface *xch, domid_t domain_id,
+ uint32_t *port);
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_access_resume(xc_interface *xch, domid_t domain_id);
#include <xen/keyhandler.h>
#include <asm/shadow.h>
#include <asm/tboot.h>
+#include <asm/mem_event.h>
static bool_t __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
u32 vmx_vmentry_control __read_mostly;
u64 vmx_ept_vpid_cap __read_mostly;
+const u32 vmx_introspection_force_enabled_msrs[] = {
+ MSR_IA32_SYSENTER_EIP,
+ MSR_IA32_SYSENTER_ESP,
+ MSR_IA32_SYSENTER_CS,
+ MSR_IA32_MC0_CTL,
+ MSR_STAR,
+ MSR_LSTAR
+};
+
+const unsigned int vmx_introspection_force_enabled_msrs_size =
+ ARRAY_SIZE(vmx_introspection_force_enabled_msrs);
+
static DEFINE_PER_CPU_READ_MOSTLY(struct vmcs_struct *, vmxon_region);
static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
{
unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
+ struct domain *d = v->domain;
/* VMX MSR bitmap supported? */
if ( msr_bitmap == NULL )
return;
+ if ( unlikely(d->arch.hvm_domain.introspection_enabled) &&
+ mem_event_check_ring(&d->mem_event->access) )
+ {
+ unsigned int i;
+
+ /* Filter out MSR-s needed for memory introspection */
+ for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
+ if ( msr == vmx_introspection_force_enabled_msrs[i] )
+ return;
+ }
+
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
*eax |= XEN_HVM_CPUID_X2APIC_VIRT;
}
+static void vmx_enable_msr_exit_interception(struct domain *d)
+{
+ struct vcpu *v;
+ unsigned int i;
+
+ /* Enable interception for MSRs needed for memory introspection. */
+ for_each_vcpu ( d, v )
+ for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
+ vmx_enable_intercept_for_msr(v, vmx_introspection_force_enabled_msrs[i],
+ MSR_TYPE_W);
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
.handle_eoi = vmx_handle_eoi,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
.hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
+ .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
};
const struct hvm_function_table * __init start_vmx(void)
switch( mec->op )
{
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
+ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION:
{
rc = -ENODEV;
/* Only HAP is supported */
rc = mem_event_enable(d, mec, med, _VPF_mem_access,
HVM_PARAM_ACCESS_RING_PFN,
mem_access_notification);
+
+ if ( mec->op != XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE &&
+ rc == 0 && hvm_funcs.enable_msr_exit_interception )
+ {
+ d->arch.hvm_domain.introspection_enabled = 1;
+ hvm_funcs.enable_msr_exit_interception(d);
+ }
}
break;
case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
{
if ( med->ring_page )
+ {
rc = mem_event_disable(d, med);
+ d->arch.hvm_domain.introspection_enabled = 0;
+ }
}
break;
bool_t mem_sharing_enabled;
bool_t qemu_mapcache_invalidate;
bool_t is_s3_suspended;
+ bool_t introspection_enabled;
/*
* TSC value that VCPUs use to calculate their tsc_offset value.
void (*hypervisor_cpuid_leaf)(uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
+
+ void (*enable_msr_exit_interception)(struct domain *d);
};
extern struct hvm_function_table hvm_funcs;
HOST_RIP = 0x00006c16,
};
+/*
+ * A set of MSR-s that need to be enabled for memory introspection
+ * to work.
+ */
+extern const u32 vmx_introspection_force_enabled_msrs[];
+extern const unsigned int vmx_introspection_force_enabled_msrs_size;
+
#define VMCS_VPID_WIDTH 16
#define MSR_TYPE_R 1
* ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
* EBUSY - guest has or had access enabled, ring buffer still active
*/
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE_INTROSPECTION 2
/*
* Sharing ENOMEM helper.