do_sched_op_compat(SCHEDOP_block, 0);
- HVMTRACE_1D(HLT, curr, /* pending = */ vcpu_runnable(curr));
+ HVMTRACE_1D(HLT, /* pending = */ vcpu_runnable(curr));
}
void hvm_triple_fault(void)
ASSERT(intack.source != hvm_intsrc_none);
- HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
+ HVMTRACE_2D(INJ_VIRQ, 0x0, /*fake=*/ 1);
/*
* Create a dummy virtual interrupt to intercept as soon as the
}
else
{
- HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+ HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
svm_inject_extint(v, intack.vector);
pt_intr_post(v, intack);
}
if ( trapnr == TRAP_page_fault )
{
vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2;
- HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2));
+ HVMTRACE_LONG_2D(PF_INJECT, errcode, TRC_PAR_LONG(cr2));
}
else
{
- HVMTRACE_2D(INJ_EXC, curr, trapnr, errcode);
+ HVMTRACE_2D(INJ_EXC, trapnr, errcode);
}
if ( (trapnr == TRAP_debug) &&
__clear_bit(X86_FEATURE_APIC & 31, edx);
}
- HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx);
+ HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
}
static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
{
- HVMTRACE_0D(DR_WRITE, v);
+ HVMTRACE_0D(DR_WRITE);
__restore_debug_registers(v);
}
regs->edx = msr_content >> 32;
done:
- HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
+ HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
return X86EMUL_OKAY;
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
+ HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
switch ( ecx )
{
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
- HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr));
+ HVMTRACE_LONG_2D(INVLPG, 0, TRC_PAR_LONG(vaddr));
paging_invlpg(curr, vaddr);
svm_asid_g_invlpg(curr, vaddr);
}
exit_reason = vmcb->exitcode;
- HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+ HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason,
(uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
0, 0, 0);
{
case VMEXIT_INTR:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(INTR, v);
+ HVMTRACE_0D(INTR);
break;
case VMEXIT_NMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(NMI, v);
+ HVMTRACE_0D(NMI);
break;
case VMEXIT_SMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
- HVMTRACE_0D(SMI, v);
+ HVMTRACE_0D(SMI);
break;
case VMEXIT_EXCEPTION_DB:
if ( paging_fault(va, regs) )
{
if (hvm_long_mode_enabled(v))
- HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, TRC_PAR_LONG(va));
+ HVMTRACE_LONG_2D(PF_XEN, regs->error_code, TRC_PAR_LONG(va));
else
- HVMTRACE_2D(PF_XEN, v, regs->error_code, va);
+ HVMTRACE_2D(PF_XEN, regs->error_code, va);
break;
}
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
case VMEXIT_EXCEPTION_MC:
- HVMTRACE_0D(MCE, v);
+ HVMTRACE_0D(MCE);
break;
case VMEXIT_VINTR:
case VMEXIT_VMMCALL:
if ( (inst_len = __get_instruction_length(v, INSTR_VMCALL)) == 0 )
break;
- HVMTRACE_1D(VMMCALL, v, regs->eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
rc = hvm_do_hypercall(regs);
if ( rc != HVM_HCALL_preempted )
{
asmlinkage void svm_trace_vmentry(void)
{
- HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
+ HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
}
/*
}
else
{
- HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
+ HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
vmx_inject_extint(v, intack.vector);
pt_intr_post(v, intack);
}
__vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
if ( trap == TRAP_page_fault )
- HVMTRACE_LONG_2D(PF_INJECT, v, error_code,
+ HVMTRACE_LONG_2D(PF_INJECT, error_code,
TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2]));
else
- HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+ HVMTRACE_2D(INJ_EXC, trap, error_code);
}
void vmx_inject_hw_exception(struct vcpu *v, int trap, int error_code)
break;
}
- HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx);
+ HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
}
static void vmx_do_cpuid(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
- HVMTRACE_0D(DR_WRITE, v);
+ HVMTRACE_0D(DR_WRITE);
if ( !v->arch.hvm_vcpu.flag_dr_dirty )
__restore_debug_registers(v);
static void vmx_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
- HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
+ HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
if ( paging_invlpg(curr, vaddr) )
vpid_sync_vcpu_gva(curr, vaddr);
}
goto exit_and_crash;
}
- HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value));
+ HVMTRACE_LONG_2D(CR_WRITE, cr, TRC_PAR_LONG(value));
HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
break;
}
- HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value));
+ HVMTRACE_LONG_2D(CR_READ, cr, TRC_PAR_LONG(value));
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
}
case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
vmx_update_guest_cr(v, 0);
- HVMTRACE_0D(CLTS, current);
+ HVMTRACE_0D(CLTS);
break;
case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
value = v->arch.hvm_vcpu.guest_cr[0];
/* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
value = (value & ~0xe) | ((exit_qualification >> 16) & 0xf);
- HVMTRACE_LONG_1D(LMSW, current, value);
+ HVMTRACE_LONG_1D(LMSW, value);
return !hvm_set_cr0(value);
default:
BUG();
regs->edx = (uint32_t)(msr_content >> 32);
done:
- HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
+ HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax,
(unsigned long)regs->edx);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
+ HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
switch ( ecx )
{
BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
- HVMTRACE_1D(INTR, current, vector);
+ HVMTRACE_1D(INTR, vector);
switch ( vector )
{
break;
case EXIT_REASON_MACHINE_CHECK:
printk("caused by machine check.\n");
- HVMTRACE_0D(MCE, curr);
+ HVMTRACE_0D(MCE);
do_machine_check(regs);
break;
default:
exit_reason = __vmread(VM_EXIT_REASON);
- HVMTRACE_ND(VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+ HVMTRACE_ND(VMEXIT64, 1/*cycles*/, 3, exit_reason,
(uint32_t)regs->eip, (uint32_t)((uint64_t)regs->eip >> 32),
0, 0, 0);
if ( paging_fault(exit_qualification, regs) )
{
if ( hvm_long_mode_enabled(v) )
- HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code,
+ HVMTRACE_LONG_2D (PF_XEN, regs->error_code,
TRC_PAR_LONG(exit_qualification) );
else
- HVMTRACE_2D (PF_XEN, v,
+ HVMTRACE_2D (PF_XEN,
regs->error_code, exit_qualification );
break;
}
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
(X86_EVENTTYPE_NMI << 8) )
goto exit_and_crash;
- HVMTRACE_0D(NMI, v);
+ HVMTRACE_0D(NMI);
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
break;
case TRAP_machine_check:
- HVMTRACE_0D(MCE, v);
+ HVMTRACE_0D(MCE);
do_machine_check(regs);
break;
default:
case EXIT_REASON_VMCALL:
{
int rc;
- HVMTRACE_1D(VMMCALL, v, regs->eax);
+ HVMTRACE_1D(VMMCALL, regs->eax);
inst_len = __get_instruction_length(); /* Safe: VMCALL */
rc = hvm_do_hypercall(regs);
if ( rc != HVM_HCALL_preempted )
asmlinkage void vmx_trace_vmentry(void)
{
- HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
+ HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
}
/*
#define TRC_PAR_LONG(par) (par)
#endif
-#define HVMTRACE_ND(evt, cycles, vcpu, count, d1, d2, d3, d4, d5, d6) \
+#define HVMTRACE_ND(evt, cycles, count, d1, d2, d3, d4, d5, d6) \
do { \
if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \
{ \
struct { \
- u32 did:16, vid:16; \
u32 d[6]; \
} _d; \
- _d.did=(vcpu)->domain->domain_id; \
- _d.vid=(vcpu)->vcpu_id; \
_d.d[0]=(d1); \
_d.d[1]=(d2); \
_d.d[2]=(d3); \
} \
} while(0)
-#define HVMTRACE_6D(evt, vcpu, d1, d2, d3, d4, d5, d6) \
- HVMTRACE_ND(evt, 0, vcpu, 6, d1, d2, d3, d4, d5, d6)
-#define HVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5) \
- HVMTRACE_ND(evt, 0, vcpu, 5, d1, d2, d3, d4, d5, 0)
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) \
- HVMTRACE_ND(evt, 0, vcpu, 4, d1, d2, d3, d4, 0, 0)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) \
- HVMTRACE_ND(evt, 0, vcpu, 3, d1, d2, d3, 0, 0, 0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2) \
- HVMTRACE_ND(evt, 0, vcpu, 2, d1, d2, 0, 0, 0, 0)
-#define HVMTRACE_1D(evt, vcpu, d1) \
- HVMTRACE_ND(evt, 0, vcpu, 1, d1, 0, 0, 0, 0, 0)
-#define HVMTRACE_0D(evt, vcpu) \
- HVMTRACE_ND(evt, 0, vcpu, 0, 0, 0, 0, 0, 0, 0)
+#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \
+ HVMTRACE_ND(evt, 0, 6, d1, d2, d3, d4, d5, d6)
+#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \
+ HVMTRACE_ND(evt, 0, 5, d1, d2, d3, d4, d5, 0)
+#define HVMTRACE_4D(evt, d1, d2, d3, d4) \
+ HVMTRACE_ND(evt, 0, 4, d1, d2, d3, d4, 0, 0)
+#define HVMTRACE_3D(evt, d1, d2, d3) \
+ HVMTRACE_ND(evt, 0, 3, d1, d2, d3, 0, 0, 0)
+#define HVMTRACE_2D(evt, d1, d2) \
+ HVMTRACE_ND(evt, 0, 2, d1, d2, 0, 0, 0, 0)
+#define HVMTRACE_1D(evt, d1) \
+ HVMTRACE_ND(evt, 0, 1, d1, 0, 0, 0, 0, 0)
+#define HVMTRACE_0D(evt) \
+ HVMTRACE_ND(evt, 0, 0, 0, 0, 0, 0, 0, 0)
#ifdef __x86_64__
-#define HVMTRACE_LONG_1D(evt, vcpu, d1) \
- HVMTRACE_2D(evt ## 64, vcpu, (d1) & 0xFFFFFFFF, (d1) >> 32)
-#define HVMTRACE_LONG_2D(evt,vcpu,d1,d2, ...) \
- HVMTRACE_3D(evt ## 64, vcpu, d1, d2)
-#define HVMTRACE_LONG_3D(evt, vcpu, d1, d2, d3, ...) \
- HVMTRACE_4D(evt ## 64, vcpu, d1, d2, d3)
-#define HVMTRACE_LONG_4D(evt, vcpu, d1, d2, d3, d4, ...) \
- HVMTRACE_5D(evt ## 64, vcpu, d1, d2, d3, d4)
+#define HVMTRACE_LONG_1D(evt, d1) \
+ HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32)
+#define HVMTRACE_LONG_2D(evt, d1, d2, ...) \
+ HVMTRACE_3D(evt ## 64, d1, d2)
+#define HVMTRACE_LONG_3D(evt, d1, d2, d3, ...) \
+ HVMTRACE_4D(evt ## 64, d1, d2, d3)
+#define HVMTRACE_LONG_4D(evt, d1, d2, d3, d4, ...) \
+ HVMTRACE_5D(evt ## 64, d1, d2, d3, d4)
#else
#define HVMTRACE_LONG_1D HVMTRACE_1D
#define HVMTRACE_LONG_2D HVMTRACE_2D