rc = -EPERM;
break;
case HVM_PARAM_MEMORY_EVENT_INT3:
- if ( d == current->domain )
+ case HVM_PARAM_MEMORY_EVENT_SINGLE_STEP:
+ if ( d == current->domain )
{
rc = -EPERM;
break;
switch( a.index )
{
case HVM_PARAM_MEMORY_EVENT_INT3:
+ case HVM_PARAM_MEMORY_EVENT_SINGLE_STEP:
{
domain_pause(d);
domain_unpause(d); /* Causes guest to latch new status */
rc = -ENOSYS;
if ( !cpu_has_monitor_trap_flag )
break;
- rc = 0;
- vcpu_pause(v);
+
+ rc = mem_event_check_ring(v->domain);
+ /* rc ==0 p2m_mem_access_check() has already paused the vcpu */
+ if ( rc < 0 )
+ vcpu_pause(v);
+
v->arch.hvm_vcpu.single_step =
(op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
- vcpu_unpause(v); /* guest will latch new state */
+
+ /* rc ==0 p2m_mem_access_resume() will unpause the vcpu */
+ if ( rc < 0 )
+ {
+ vcpu_unpause(v); /* guest will latch new state */
+ rc = 0;
+ }
break;
default:
rc = -ENOSYS;
MEM_EVENT_REASON_INT3,
gfn, 0, 1, gla);
}
+
+int hvm_memory_event_single_step(unsigned long gla)
+{
+ uint32_t pfec = PFEC_page_present;
+ unsigned long gfn;
+ gfn = paging_gva_to_gfn(current, gla, &pfec);
+
+ return hvm_memory_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
+ MEM_EVENT_REASON_SINGLESTEP,
+ gfn, 0, 1, gla);
+}
#endif /* __x86_64__ */
int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
hvm_asid_flush_vcpu(v);
}
- debug_state = v->domain->debugger_attached
- || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+ debug_state = v->domain->debugger_attached
+ || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3]
+ || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP];
if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
{
case EXIT_REASON_MONITOR_TRAP_FLAG:
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
vmx_update_cpu_exec_control(v);
- if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
- domain_pause_for_debugger();
+ if ( v->arch.hvm_vcpu.single_step ) {
+ hvm_memory_event_single_step(regs->eip);
+ if ( v->domain->debugger_attached )
+ domain_pause_for_debugger();
+ }
+
break;
case EXIT_REASON_PAUSE_INSTRUCTION:
void hvm_memory_event_cr4(unsigned long value, unsigned long old);
/* Called for current VCPU on int3: returns -1 if no listener */
int hvm_memory_event_int3(unsigned long gla);
+
+/* Called for current VCPU on single step: returns -1 if no listener */
+int hvm_memory_event_single_step(unsigned long gla);
+
#else
static inline void hvm_memory_event_cr0(unsigned long value, unsigned long old)
{ }
{ }
static inline int hvm_memory_event_int3(unsigned long gla)
{ return 0; }
+static inline int hvm_memory_event_single_step(unsigned long gla)
+{ return 0; }
#endif
/*
/* Enable blocking memory events, async or sync (pause vcpu until response)
* onchangeonly indicates messages only on a change of value */
-#define HVM_PARAM_MEMORY_EVENT_CR0 20
-#define HVM_PARAM_MEMORY_EVENT_CR3 21
-#define HVM_PARAM_MEMORY_EVENT_CR4 22
-#define HVM_PARAM_MEMORY_EVENT_INT3 23
+#define HVM_PARAM_MEMORY_EVENT_CR0 20
+#define HVM_PARAM_MEMORY_EVENT_CR3 21
+#define HVM_PARAM_MEMORY_EVENT_CR4 22
+#define HVM_PARAM_MEMORY_EVENT_INT3 23
+#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25
#define HVMPME_MODE_MASK (3 << 0)
#define HVMPME_mode_disabled 0
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
-#define HVM_NR_PARAMS 25
+#define HVM_NR_PARAMS 26
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */
#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
+#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn are RIP */
typedef struct mem_event_shared_page {
uint32_t port;