}
+int xc_domain_debug_control(int xc, uint32_t domid, uint32_t sop, uint32_t vcpu)
+{
+ DECLARE_DOMCTL;
+
+ memset(&domctl, 0, sizeof(domctl));
+ domctl.domain = (domid_t)domid;
+ domctl.cmd = XEN_DOMCTL_debug_op;
+ domctl.u.debug_op.op = sop;
+ domctl.u.debug_op.vcpu = vcpu;
+
+ return do_domctl(xc, &domctl);
+}
+
+
/*
* Local variables:
* mode: C
/* XXX we can still have problems if the user switches threads
* during single-stepping - but that just seems retarded
*/
- ctxt[cpu].c.user_regs.eflags |= PSL_T;
- if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
- &ctxt[cpu])))
- goto out_error_domctl;
+ /* Try to enalbe Monitor Trap Flag for HVM, and fall back to TF
+ * if no MTF support
+ */
+ if ( !current_is_hvm ||
+ xc_domain_debug_control(xc_handle,
+ current_domid,
+ XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON,
+ cpu) )
+ {
+ ctxt[cpu].c.user_regs.eflags |= PSL_T;
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
+ &ctxt[cpu])))
+ goto out_error_domctl;
+ }
/* FALLTHROUGH */
case PTRACE_CONT:
{
FOREACH_CPU(cpumap, index) {
cpu = index - 1;
- if (fetch_regs(xc_handle, cpu, NULL))
- goto out_error;
- /* Clear trace flag */
- if ( ctxt[cpu].c.user_regs.eflags & PSL_T )
+ if ( !current_is_hvm ||
+ xc_domain_debug_control(xc_handle,
+ current_domid,
+ XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF,
+ cpu) )
{
- ctxt[cpu].c.user_regs.eflags &= ~PSL_T;
- if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
- cpu, &ctxt[cpu])))
- goto out_error_domctl;
+ if (fetch_regs(xc_handle, cpu, NULL))
+ goto out_error;
+ /* Clear trace flag */
+ if ( ctxt[cpu].c.user_regs.eflags & PSL_T )
+ {
+ ctxt[cpu].c.user_regs.eflags &= ~PSL_T;
+ if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
+ cpu, &ctxt[cpu])))
+ goto out_error_domctl;
+ }
}
}
}
uint32_t domid,
uint32_t target);
+/* Control the domain for debug */
+int xc_domain_debug_control(int xc_handle,
+ uint32_t domid,
+ uint32_t sop,
+ uint32_t vcpu);
+
#if defined(__i386__) || defined(__x86_64__)
int xc_cpuid_check(int xc,
const unsigned int *input,
}
break;
+ case XEN_DOMCTL_debug_op:
+ {
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ d = rcu_lock_domain_by_id(domctl->domain);
+ if ( d == NULL )
+ break;
+
+ ret = -EINVAL;
+ if ( (domctl->u.debug_op.vcpu >= MAX_VIRT_CPUS) ||
+ ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
+ goto debug_op_out;
+
+ ret = -EINVAL;
+ if ( !is_hvm_domain(d))
+ goto debug_op_out;
+
+ ret = hvm_debug_op(v, domctl->u.debug_op.op);
+
+ debug_op_out:
+ rcu_unlock_domain(d);
+ }
+ break;
+
default:
ret = -ENOSYS;
break;
return rc;
}
+int hvm_debug_op(struct vcpu *v, int32_t op)
+{
+ int rc;
+
+ switch ( op )
+ {
+ case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON:
+ case XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF:
+ rc = -ENOSYS;
+ if ( !cpu_has_monitor_trap_flag )
+ break;
+ rc = 0;
+ vcpu_pause(v);
+ v->arch.hvm_vcpu.single_step =
+ (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
+ vcpu_unpause(v); /* guest will latch new state */
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ return rc;
+}
+
+
/*
* Local variables:
* mode: C
unsigned int tpr_threshold = 0;
enum hvm_intblk intblk;
+ /* Block event injection when single step with MTF. */
+ if ( unlikely(v->arch.hvm_vcpu.single_step) )
+ {
+ v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ return;
+ }
+
/* Crank the handle on interrupt state. */
pt_update_irq(v);
hvm_dirq_assist(v);
(opt_softtsc ? CPU_BASED_RDTSC_EXITING : 0));
opt = (CPU_BASED_ACTIVATE_MSR_BITMAP |
CPU_BASED_TPR_SHADOW |
+ CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
_vmx_cpu_based_exec_control = adjust_vmx_controls(
min, opt, MSR_IA32_VMX_PROCBASED_CTLS);
v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
}
+ /* Do not enable Monitor Trap Flag unless start single step debug */
+ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+
__vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
if ( cpu_has_vmx_secondary_exec_control )
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
{
unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
- unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
+ unsigned long mask = 1u << TRAP_int3;
+
+ if ( !cpu_has_monitor_trap_flag )
+ mask |= 1u << TRAP_debug;
+
v->arch.hvm_vcpu.debug_state_latch = debug_state;
if ( debug_state )
intercepts |= mask;
__restore_debug_registers(curr);
write_debugreg(6, read_debugreg(6) | 0x4000);
}
+ if ( cpu_has_monitor_trap_flag )
+ break;
case TRAP_int3:
if ( curr->domain->debugger_attached )
{
*/
exit_qualification = __vmread(EXIT_QUALIFICATION);
write_debugreg(6, exit_qualification | 0xffff0ff0);
- if ( !v->domain->debugger_attached )
+ if ( !v->domain->debugger_attached || cpu_has_monitor_trap_flag )
goto exit_and_crash;
domain_pause_for_debugger();
break;
break;
}
+ case EXIT_REASON_MONITOR_TRAP_FLAG:
+ {
+ v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
+ domain_pause_for_debugger();
+ break;
+ }
+
default:
exit_and_crash:
gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
return hvm_funcs.set_info_guest(v);
}
+int hvm_debug_op(struct vcpu *v, int32_t op);
+
#endif /* __ASM_X86_HVM_HVM_H__ */
bool_t flag_dr_dirty;
bool_t debug_state_latch;
+ bool_t single_step;
union {
struct arch_vmx_struct vmx;
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
+#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000
#define CPU_BASED_MONITOR_EXITING 0x20000000
#define CPU_BASED_PAUSE_EXITING 0x40000000
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
#define cpu_has_vmx_vpid \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
+#define cpu_has_monitor_trap_flag \
+ (vmx_cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG)
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define VMX_INTR_SHADOW_STI 0x00000001
#define EXIT_REASON_INVALID_GUEST_STATE 33
#define EXIT_REASON_MSR_LOADING 34
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_TRAP_FLAG 37
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MACHINE_CHECK 41
*/
#define XEN_DOMCTL_suppress_spurious_page_faults 53
+#define XEN_DOMCTL_debug_op 54
+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0
+#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1
+struct xen_domctl_debug_op {
+ uint32_t op; /* IN */
+ uint32_t vcpu; /* IN */
+};
+typedef struct xen_domctl_debug_op xen_domctl_debug_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t);
+
+
struct xen_domctl {
uint32_t cmd;
uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
struct xen_domctl_subscribe subscribe;
+ struct xen_domctl_debug_op debug_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif