0x00000000 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) unknown (0x%(event)016x) [ 0x%(1)08x 0x%(2)08x 0x%(3)08x 0x%(4)08x 0x%(5)08x ]
-0x0001f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) lost_records 0x%(1)08x
-0x0001f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) wrap_buffer 0x%(1)08x
-0x0001f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) cpu_change 0x%(1)08x
+0x0001f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) lost_records 0x%(1)08x
+0x0001f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) wrap_buffer 0x%(1)08x
+0x0001f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) cpu_change 0x%(1)08x
-0x0002f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_add_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_rem_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_sleep [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f004 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_wake [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f005 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) do_yield [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f006 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) do_block [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f007 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_shutdown [ domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
+0x0002f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_add_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_rem_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_sleep [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f004 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_wake [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f005 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) do_yield [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f006 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) do_block [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f007 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) domain_shutdown [ domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
0x0002f008 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_ctl
-0x0002f009 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_adjdom [ domid = 0x%(1)08x ]
-0x0002f00a CPU%(cpu)d %(tsc)d (+%(reltsc)8d) __enter_scheduler [ prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 0x%(4)08x ]
+0x0002f009 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) sched_adjdom [ domid = 0x%(1)08x ]
+0x0002f00a CPU%(cpu)d %(tsc)d (+%(reltsc)8d) __enter_scheduler [ prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 0x%(4)08x ]
0x0002f00B CPU%(cpu)d %(tsc)d (+%(reltsc)8d) s_timer_fn
0x0002f00c CPU%(cpu)d %(tsc)d (+%(reltsc)8d) t_timer_fn
0x0002f00d CPU%(cpu)d %(tsc)d (+%(reltsc)8d) dom_timer_fn
+0x0002f00e CPU%(cpu)d %(tsc)d (+%(reltsc)8d) switch_infprev [ old_domid = 0x%(1)08x, runtime = %(2)d ]
+0x0002f00f CPU%(cpu)d %(tsc)d (+%(reltsc)8d) switch_infnext [ new_domid = 0x%(1)08x, time = %(2)d, r_time = %(3)d ]
0x00081001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMENTRY [ dom:vcpu = 0x%(1)08x ]
0x00081002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMEXIT [ dom:vcpu = 0x%(1)08x, exitcode = 0x%(2)08x, rIP = 0x%(3)08x ]
-0x00082001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
-0x00082002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_INJECT [ dom:vcpu = 0x%(1)08x, virt = 0x%(2)08x, errorcode = 0x%(3)02x ]
+0x00081102 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMEXIT [ dom:vcpu = 0x%(1)08x, exitcode = 0x%(2)08x, rIP = 0x%(3)016x ]
+0x00082001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(3)02x, virt = 0x%(2)08x ]
+0x00082101 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(3)02x, virt = 0x%(2)016x ]
+0x00082002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_INJECT [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
+0x00082102 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_INJECT [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)016x ]
0x00082003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INJ_EXC [ dom:vcpu = 0x%(1)08x, vector = 0x%(2)02x, errorcode = 0x%(3)04x ]
0x00082004 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INJ_VIRQ [ dom:vcpu = 0x%(1)08x, vector = 0x%(2)02x, fake = %(3)d ]
0x00082005 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) REINJ_VIRQ [ dom:vcpu = 0x%(1)08x, vector = 0x%(2)02x ]
0x00082006 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) IO_READ [ dom:vcpu = 0x%(1)08x, port = 0x%(2)04x, size = %(3)d ]
0x00082007 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) IO_WRITE [ dom:vcpu = 0x%(1)08x, port = 0x%(2)04x, size = %(3)d ]
0x00082008 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CR_READ [ dom:vcpu = 0x%(1)08x, CR# = %(2)d, value = 0x%(3)08x ]
+0x00082108 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CR_READ [ dom:vcpu = 0x%(1)08x, CR# = %(2)d, value = 0x%(3)016x ]
0x00082009 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CR_WRITE [ dom:vcpu = 0x%(1)08x, CR# = %(2)d, value = 0x%(3)08x ]
+0x00082109 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CR_WRITE [ dom:vcpu = 0x%(1)08x, CR# = %(2)d, value = 0x%(3)016x ]
0x0008200A CPU%(cpu)d %(tsc)d (+%(reltsc)8d) DR_READ [ dom:vcpu = 0x%(1)08x ]
0x0008200B CPU%(cpu)d %(tsc)d (+%(reltsc)8d) DR_WRITE [ dom:vcpu = 0x%(1)08x ]
-0x0008200C CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MSR_READ [ dom:vcpu = 0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)08x ]
-0x0008200D CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MSR_WRITE [ dom:vcpu = 0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)08x ]
-0x0008200E CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CPUID [ dom:vcpu = 0x%(1)08x, func = 0x%(2)08x, eax:ebx = 0x%(3)016x, ecx:edx = 0x%(4)016x ]
+0x0008200C CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MSR_READ [ dom:vcpu = 0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)016x ]
+0x0008200D CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MSR_WRITE [ dom:vcpu = 0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)016x ]
+0x0008200E CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CPUID [ dom:vcpu = 0x%(1)08x, func = 0x%(2)08x, eax = 0x%(3)08x, ebx = 0x%(4)08x, ecx=0x%(5)08x, edx = 0x%(6)08x ]
0x0008200F CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INTR [ dom:vcpu = 0x%(1)08x, vector = 0x%(2)02x ]
0x00082010 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) NMI [ dom:vcpu = 0x%(1)08x ]
0x00082011 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) SMI [ dom:vcpu = 0x%(1)08x ]
0x00082012 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMMCALL [ dom:vcpu = 0x%(1)08x, func = 0x%(2)08x ]
0x00082013 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) HLT [ dom:vcpu = 0x%(1)08x, intpending = %(2)d ]
-0x00082014 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INVLPG [ dom:vcpu = 0x%(1)08x, virt = 0x%(2)08x, invlpga = %(3)d, asid = 0x%(4)02x ]
+0x00082014 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INVLPG [ dom:vcpu = 0x%(1)08x, is invlpga? = %(2)d, virt = 0x%(3)08x ]
+0x00082114 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INVLPG [ dom:vcpu = 0x%(1)08x, is invlpga? = %(2)d, virt = 0x%(3)016x ]
0x00082015 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MCE [ dom:vcpu = 0x%(1)08x ]
0x00082016 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) IO_ASSIST [ dom:vcpu = 0x%(1)08x, data = 0x%(2)04x ]
0x00082017 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) MMIO_ASSIST [ dom:vcpu = 0x%(1)08x, data = 0x%(2)04x ]
0x00082018 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) CLTS [ dom:vcpu = 0x%(1)08x ]
0x00082019 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) LMSW [ dom:vcpu = 0x%(1)08x, value = 0x%(2)08x ]
-0x00082020 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PF_XEN64 [ dom:vcpu = 0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
+0x00082119 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) LMSW [ dom:vcpu = 0x%(1)08x, value = 0x%(2)016x ]
+
+0x0010f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_map [ domid = %(1)d ]
+0x0010f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_unmap [ domid = %(1)d ]
+0x0010f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_transfer [ domid = %(1)d ]
+
+0x0020f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) hypercall [ eip = 0x%(1)08x, eax = 0x%(2)08x ]
+0x0020f101 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) hypercall [ rip = 0x%(1)016x, eax = 0x%(2)08x ]
+0x0020f003 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) trap [ eip = 0x%(1)08x, trapnr:error = 0x%(2)08x ]
+0x0020f103 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) trap [ rip = 0x%(1)016x, trapnr:error = 0x%(2)08x ]
+0x0020f004 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_fault [ eip = 0x%(1)08x, addr = 0x%(2)08x, error = 0x%(3)08x ]
+0x0020f104 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_fault [ rip = 0x%(1)16x, addr = 0x%(3)16x, error = 0x%(5)08x ]
{event_id}{whitespace}{text format string}
The textual format string may include format specifiers, such as:
- %(cpu)d, %(tsc)d, %(event)d, %(1)d, %(2)d, %(3)d, %(4)d, %(5)d
+ %(cpu)d, %(tsc)d, %(event)d, %(1)d, %(2)d, %(3)d, %(4)d, ...
[ the 'd' format specifier outputs in decimal, alternatively 'x'
will output in hexadecimal and 'o' will output in octal ]
Which correspond to the CPU number, event ID, timestamp counter and
- the 5 data fields from the trace record. There should be one such
+ the 7 data fields from the trace record. There should be one such
rule for each type of event.
Depending on your system and the volume of trace buffer data,
defs = read_defs(arg[0])
# structure of trace record (as output by xentrace):
-# HDR(I) {TSC(Q)} D1(I) D2(I) D3(I) D4(I) D5(I)
+# HDR(I) {TSC(Q)} D1(I) D2(I) D3(I) D4(I) D5(I) D6(I) D7(I)
#
# HDR consists of EVENT:28:, n_data:3:, tsc_in:1:
# EVENT means Event ID
D3REC = "III"
D4REC = "IIII"
D5REC = "IIIII"
+D6REC = "IIIIII"
+D7REC = "IIIIIII"
last_tsc = [0]
d3 = 0
d4 = 0
d5 = 0
+ d6 = 0
+ d7 = 0
tsc = 0
if not line:
break
(d1, d2, d3, d4, d5) = struct.unpack(D5REC, line)
+ if n_data == 6:
+ line = sys.stdin.read(struct.calcsize(D6REC))
+ if not line:
+ break
+ (d1, d2, d3, d4, d5, d6) = struct.unpack(D6REC, line)
+ if n_data == 7:
+ line = sys.stdin.read(struct.calcsize(D7REC))
+ if not line:
+ break
+ (d1, d2, d3, d4, d5, d6, d7) = struct.unpack(D7REC, line)
# Event field is 28bit of 'uint32_t' in header, not 'long'.
event &= 0x0fffffff
'2' : d2,
'3' : d3,
'4' : d4,
- '5' : d5 }
+ '5' : d5,
+ '6' : d6,
+ '7' : d7 }
try:
if ( trapnr == TRAP_page_fault )
{
vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2;
- HVMTRACE_2D(PF_INJECT, curr, curr->arch.hvm_vcpu.guest_cr[2], errcode);
+ HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2));
}
else
{
__clear_bit(X86_FEATURE_APIC & 31, edx);
}
- HVMTRACE_3D(CPUID, v, input,
- ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx);
+ HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx);
}
static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
regs->edx = msr_content >> 32;
done:
- hvmtrace_msr_read(v, ecx, msr_content);
+ HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
return X86EMUL_OKAY;
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- hvmtrace_msr_write(v, ecx, msr_content);
+ HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
switch ( ecx )
{
static void svm_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
- HVMTRACE_2D(INVLPG, curr, 0, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr));
paging_invlpg(curr, vaddr);
svm_asid_g_invlpg(curr, vaddr);
}
exit_reason = vmcb->exitcode;
- hvmtrace_vmexit(v, regs->eip, exit_reason);
+ if ( hvm_long_mode_enabled(v) )
+ HVMTRACE_ND (VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+ regs->eip & 0xFFFFFFFF, regs->eip >> 32, 0, 0, 0);
+ else
+ HVMTRACE_ND (VMEXIT, 1/*cycles*/, v, 2, exit_reason,
+ regs->eip, 0, 0, 0, 0);
if ( unlikely(exit_reason == VMEXIT_INVALID) )
{
if ( paging_fault(va, regs) )
{
- HVMTRACE_2D(PF_XEN, v, va, regs->error_code);
+ if (hvm_long_mode_enabled(v))
+ HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, TRC_PAR_LONG(va));
+ else
+ HVMTRACE_2D(PF_XEN, v, regs->error_code, va);
break;
}
asmlinkage void svm_trace_vmentry(void)
{
- hvmtrace_vmentry(current);
+ HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
}
/*
__vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
if ( trap == TRAP_page_fault )
- HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
+ HVMTRACE_LONG_2D(PF_INJECT, v, error_code,
+ TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2]));
else
HVMTRACE_2D(INJ_EXC, v, trap, error_code);
}
break;
}
- HVMTRACE_3D(CPUID, current, input,
- ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx);
+ HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx);
}
static void vmx_do_cpuid(struct cpu_user_regs *regs)
static void vmx_invlpg_intercept(unsigned long vaddr)
{
struct vcpu *curr = current;
- HVMTRACE_2D(INVLPG, curr, /*invlpga=*/ 0, vaddr);
+ HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
if ( paging_invlpg(curr, vaddr) )
vpid_sync_vcpu_gva(curr, vaddr);
}
goto exit_and_crash;
}
- HVMTRACE_2D(CR_WRITE, v, cr, value);
+ HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value));
HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
break;
}
- HVMTRACE_2D(CR_READ, v, cr, value);
+ HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value));
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
}
case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
value = v->arch.hvm_vcpu.guest_cr[0];
value = (value & ~0xFFFF) | ((exit_qualification >> 16) & 0xFFFF);
- HVMTRACE_1D(LMSW, current, value);
+ HVMTRACE_LONG_1D(LMSW, current, value);
return !hvm_set_cr0(value);
default:
BUG();
regs->edx = (uint32_t)(msr_content >> 32);
done:
- hvmtrace_msr_read(v, ecx, msr_content);
+ HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax,
(unsigned long)regs->edx);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- hvmtrace_msr_write(v, ecx, msr_content);
+ HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
switch ( ecx )
{
exit_reason = __vmread(VM_EXIT_REASON);
- hvmtrace_vmexit(v, regs->eip, exit_reason);
+ if ( hvm_long_mode_enabled(v) )
+ HVMTRACE_ND (VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+ regs->eip & 0xFFFFFFFF, regs->eip >> 32, 0, 0, 0);
+ else
+ HVMTRACE_ND (VMEXIT, 1/*cycles*/, v, 2, exit_reason,
+ regs->eip, 0, 0, 0, 0);
perfc_incra(vmexits, exit_reason);
if ( paging_fault(exit_qualification, regs) )
{
- hvmtrace_pf_xen(v, exit_qualification, regs->error_code);
+ if ( hvm_long_mode_enabled(v) )
+ HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code,
+ TRC_PAR_LONG(exit_qualification) );
+ else
+ HVMTRACE_2D (PF_XEN, v,
+ regs->error_code, exit_qualification );
break;
}
asmlinkage void vmx_trace_vmentry(void)
{
- hvmtrace_vmentry(current);
+ HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
}
/*
#include <xen/trace.h>
#ifndef __x86_64__
-#undef TRC_PV_64_FLAG
-#define TRC_PV_64_FLAG 0
+#undef TRC_64_FLAG
+#define TRC_64_FLAG 0
#endif
asmlinkage void trace_hypercall(void)
u32 event;
event = TRC_PV_HYPERCALL;
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
d.eip = regs->eip;
d.eax = regs->eax;
d.use_error_code=!!use_error_code;
event = TRC_PV_TRAP;
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
__trace_var(event, 1, sizeof(d), (unsigned char *)&d);
}
}
d.addr = addr;
d.error_code = error_code;
event = TRC_PV_PAGE_FAULT;
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
__trace_var(event, 1, sizeof(d), (unsigned char *)&d);
}
}
else
#endif
{
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
__trace_var(event, 1, sizeof(va), (unsigned char *)&va);
}
}
} __attribute__((packed)) d;
d.va1=va1;
d.va2=va2;
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
__trace_var(event, 1, sizeof(d), (unsigned char *)&d);
}
}
event = ((CONFIG_PAGING_LEVELS == 3) ?
TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
- event |= TRC_PV_64_FLAG;
+ event |= TRC_64_FLAG;
__trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
}
}
#include <xen/trace.h>
-#define DO_TRC_HVM_VMENTRY 1
-#define DO_TRC_HVM_VMEXIT 1
-#define DO_TRC_HVM_PF_XEN 1
-#define DO_TRC_HVM_PF_INJECT 1
-#define DO_TRC_HVM_INJ_EXC 1
-#define DO_TRC_HVM_INJ_VIRQ 1
-#define DO_TRC_HVM_REINJ_VIRQ 1
-#define DO_TRC_HVM_IO_READ 1
-#define DO_TRC_HVM_IO_WRITE 1
-#define DO_TRC_HVM_CR_READ 1
-#define DO_TRC_HVM_CR_WRITE 1
-#define DO_TRC_HVM_DR_READ 1
-#define DO_TRC_HVM_DR_WRITE 1
-#define DO_TRC_HVM_MSR_READ 1
-#define DO_TRC_HVM_MSR_WRITE 1
-#define DO_TRC_HVM_CPUID 1
-#define DO_TRC_HVM_INTR 1
-#define DO_TRC_HVM_NMI 1
-#define DO_TRC_HVM_MCE 1
-#define DO_TRC_HVM_SMI 1
-#define DO_TRC_HVM_VMMCALL 1
-#define DO_TRC_HVM_HLT 1
-#define DO_TRC_HVM_INVLPG 1
-#define DO_TRC_HVM_IO_ASSIST 1
-#define DO_TRC_HVM_MMIO_ASSIST 1
-#define DO_TRC_HVM_CLTS 1
-#define DO_TRC_HVM_LMSW 1
+#define DEFAULT_HVM_TRACE_ON 1
+#define DEFAULT_HVM_TRACE_OFF 0
+
+#define DEFAULT_HVM_VMSWITCH DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_PF DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_INJECT DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_IO DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_REGACCESS DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_MISC DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_INTR DEFAULT_HVM_TRACE_ON
+
+#define DO_TRC_HVM_VMENTRY DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_VMEXIT DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_VMEXIT64 DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_PF_XEN DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_XEN64 DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_INJECT DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_INJECT64 DEFAULT_HVM_PF
+#define DO_TRC_HVM_INJ_EXC DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_INJ_VIRQ DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_REINJ_VIRQ DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_IO_READ DEFAULT_HVM_IO
+#define DO_TRC_HVM_IO_WRITE DEFAULT_HVM_IO
+#define DO_TRC_HVM_CR_READ DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_READ64 DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_WRITE DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_WRITE64 DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_DR_READ DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_DR_WRITE DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_MSR_READ DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_MSR_WRITE DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CPUID DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INTR DEFAULT_HVM_INTR
+#define DO_TRC_HVM_NMI DEFAULT_HVM_INTR
+#define DO_TRC_HVM_MCE DEFAULT_HVM_INTR
+#define DO_TRC_HVM_SMI DEFAULT_HVM_INTR
+#define DO_TRC_HVM_VMMCALL DEFAULT_HVM_MISC
+#define DO_TRC_HVM_HLT DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INVLPG DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INVLPG64 DEFAULT_HVM_MISC
+#define DO_TRC_HVM_IO_ASSIST DEFAULT_HVM_MISC
+#define DO_TRC_HVM_MMIO_ASSIST DEFAULT_HVM_MISC
+#define DO_TRC_HVM_CLTS DEFAULT_HVM_MISC
+#define DO_TRC_HVM_LMSW DEFAULT_HVM_MISC
+#define DO_TRC_HVM_LMSW64 DEFAULT_HVM_MISC
-static inline void hvmtrace_vmexit(struct vcpu *v,
- unsigned long rip,
- unsigned long exit_reason)
-{
- if ( likely(!tb_init_done) )
- return;
#ifdef __x86_64__
- if ( hvm_long_mode_enabled(v) )
- {
- struct {
- unsigned did:16, vid:16;
- unsigned exit_reason:32;
- u64 rip;
- } d;
-
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.exit_reason = exit_reason;
- d.rip = rip;
- __trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d),
- (unsigned char *)&d);
- }
- else
+#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
+#else
+#define TRC_PAR_LONG(par) (par)
#endif
- {
- struct {
- unsigned did:16, vid:16;
- unsigned exit_reason:32;
- u32 eip;
- } d;
-
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.exit_reason = exit_reason;
- d.eip = rip;
- __trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d),
- (unsigned char *)&d);
- }
-}
-
-
-static inline void hvmtrace_vmentry(struct vcpu *v)
-{
- struct {
- unsigned did:16, vid:16;
- } d;
-
- if ( likely(!tb_init_done) )
- return;
-
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- __trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
-}
-
-static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content)
-{
- struct {
- unsigned did:16, vid:16;
- u32 ecx;
- u64 msr_content;
- } d;
-
- if ( likely(!tb_init_done) )
- return;
-
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.ecx = ecx;
- d.msr_content = msr_content;
- __trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d),
- (unsigned char *)&d);
-}
-
-static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content)
-{
- struct {
- unsigned did:16, vid:16;
- u32 ecx;
- u64 msr_content;
- } d;
-
- if ( likely(!tb_init_done) )
- return;
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.ecx = ecx;
- d.msr_content = msr_content;
- __trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d),
- (unsigned char *)&d);
-}
-
-static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va,
- u32 error_code)
-{
- if ( likely(!tb_init_done) )
- return;
-
-#ifdef __x86_64__
- if( hvm_long_mode_enabled(v) )
- {
- struct {
- unsigned did:16, vid:16;
- u32 error_code;
- u64 va;
- } d;
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.error_code = error_code;
- d.va = va;
- __trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d),
- (unsigned char *)&d);
- }
- else
-#endif
- {
- struct {
- unsigned did:16, vid:16;
- u32 error_code;
- u32 va;
- } d;
- d.did = v->domain->domain_id;
- d.vid = v->vcpu_id;
- d.error_code = error_code;
- d.va = va;
- __trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d),
- (unsigned char *)&d);
- }
-}
-
-#define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4) \
+#define HVMTRACE_ND(evt, cycles, vcpu, count, d1, d2, d3, d4, d5, d6) \
do { \
if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \
{ \
struct { \
- unsigned did:16, vid:16; \
- u32 d[4]; \
+ u32 did:16, vid:16; \
+ u32 d[6]; \
} _d; \
_d.did=(vcpu)->domain->domain_id; \
_d.vid=(vcpu)->vcpu_id; \
_d.d[1]=(d2); \
_d.d[2]=(d3); \
_d.d[3]=(d4); \
- __trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \
+ _d.d[4]=(d5); \
+ _d.d[5]=(d6); \
+ __trace_var(TRC_HVM_ ## evt, cycles, \
sizeof(u32)*count+1, (unsigned char *)&_d); \
} \
} while(0)
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) HVMTRACE_ND(evt, vcpu, 4, d1, d2, d3, d4)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) HVMTRACE_ND(evt, vcpu, 3, d1, d2, d3, 0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2) HVMTRACE_ND(evt, vcpu, 2, d1, d2, 0, 0)
-#define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_ND(evt, vcpu, 1, d1, 0, 0, 0)
-#define HVMTRACE_0D(evt, vcpu) HVMTRACE_ND(evt, vcpu, 0, 0, 0, 0, 0)
+#define HVMTRACE_6D(evt, vcpu, d1, d2, d3, d4, d5, d6) \
+ HVMTRACE_ND(evt, 0, vcpu, 6, d1, d2, d3, d4, d5, d6)
+#define HVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5) \
+ HVMTRACE_ND(evt, 0, vcpu, 5, d1, d2, d3, d4, d5, 0)
+#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) \
+ HVMTRACE_ND(evt, 0, vcpu, 4, d1, d2, d3, d4, 0, 0)
+#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) \
+ HVMTRACE_ND(evt, 0, vcpu, 3, d1, d2, d3, 0, 0, 0)
+#define HVMTRACE_2D(evt, vcpu, d1, d2) \
+ HVMTRACE_ND(evt, 0, vcpu, 2, d1, d2, 0, 0, 0, 0)
+#define HVMTRACE_1D(evt, vcpu, d1) \
+ HVMTRACE_ND(evt, 0, vcpu, 1, d1, 0, 0, 0, 0, 0)
+#define HVMTRACE_0D(evt, vcpu) \
+ HVMTRACE_ND(evt, 0, vcpu, 0, 0, 0, 0, 0, 0, 0)
+
+
+
+#ifdef __x86_64__
+#define HVMTRACE_LONG_1D(evt, vcpu, d1) \
+ HVMTRACE_2D(evt ## 64, vcpu, (d1) & 0xFFFFFFFF, (d1) >> 32)
+#define HVMTRACE_LONG_2D(evt,vcpu,d1,d2, ...) \
+ HVMTRACE_3D(evt ## 64, vcpu, d1, d2)
+#define HVMTRACE_LONG_3D(evt, vcpu, d1, d2, d3, ...) \
+ HVMTRACE_4D(evt ## 64, vcpu, d1, d2, d3)
+#define HVMTRACE_LONG_4D(evt, vcpu, d1, d2, d3, d4, ...) \
+ HVMTRACE_5D(evt ## 64, vcpu, d1, d2, d3, d4)
+#else
+#define HVMTRACE_LONG_1D HVMTRACE_1D
+#define HVMTRACE_LONG_2D HVMTRACE_2D
+#define HVMTRACE_LONG_3D HVMTRACE_3D
+#define HVMTRACE_LONG_4D HVMTRACE_4D
+#endif
#endif /* __ASM_X86_HVM_TRACE_H__ */
#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
/* Indicates that addresses in trace record are 64 bits */
-#define TRC_PV_64_FLAG (0x100)
+#define TRC_64_FLAG (0x100)
/* trace events per subclass */
#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
-#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + 0x03)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
-#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
/* This structure represents a single trace buffer record. */
struct t_rec {