It is unclear why this got done for PV only originally.
While at it, limit this statistics collection to debug or performance
counter enabled builds.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
{
tsc = hvm_get_guest_time_fixed(v, at_tsc);
tsc = gtime_to_gtsc(v->domain, tsc);
- v->domain->arch.vtsc_kerncount++;
}
else if ( at_tsc )
{
}
}
+static uint64_t _hvm_rdtsc_intercept(void)
+{
+ struct vcpu *curr = current;
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
+ struct domain *currd = curr->domain;
+
+ if ( currd->arch.vtsc )
+ switch ( hvm_guest_x86_mode(curr) )
+ {
+ struct segment_register sreg;
+
+ case 8:
+ case 4:
+ case 2:
+ hvm_get_segment_register(curr, x86_seg_ss, &sreg);
+ if ( unlikely(sreg.attr.fields.dpl) )
+ {
+ case 1:
+ currd->arch.vtsc_usercount++;
+ break;
+ }
+ /* fall through */
+ case 0:
+ currd->arch.vtsc_kerncount++;
+ break;
+ }
+#endif
+
+ return hvm_get_guest_tsc(curr);
+}
+
void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
{
- uint64_t tsc;
- struct vcpu *v = current;
+ uint64_t tsc = _hvm_rdtsc_intercept();
- tsc = hvm_get_guest_tsc(v);
regs->eax = (uint32_t)tsc;
regs->edx = (uint32_t)(tsc >> 32);
break;
case MSR_IA32_TSC:
- *msr_content = hvm_get_guest_tsc(v);
+ *msr_content = _hvm_rdtsc_intercept();
break;
case MSR_IA32_TSC_ADJUST:
spin_lock(&d->arch.vtsc_lock);
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
if ( guest_kernel_mode(v, regs) )
d->arch.vtsc_kerncount++;
else
d->arch.vtsc_usercount++;
+#endif
if ( (int64_t)(now - d->arch.vtsc_last) > 0 )
d->arch.vtsc_last = now;
printk(",khz=%"PRIu32, d->arch.tsc_khz);
if ( d->arch.incarnation )
printk(",inc=%"PRIu32, d->arch.incarnation);
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
if ( !(d->arch.vtsc_kerncount | d->arch.vtsc_usercount) )
- {
printk("\n");
- continue;
- }
- if ( is_hvm_domain(d) )
- printk(",vtsc count: %"PRIu64" total\n",
- d->arch.vtsc_kerncount);
else
printk(",vtsc count: %"PRIu64" kernel, %"PRIu64" user\n",
d->arch.vtsc_kerncount, d->arch.vtsc_usercount);
+#endif
domcnt++;
}
struct time_scale ns_to_vtsc; /* scaling for certain emulated cases */
uint32_t incarnation; /* incremented every restore or live migrate
(possibly other cases in the future */
- uint64_t vtsc_kerncount; /* for hvm, counts all vtsc */
- uint64_t vtsc_usercount; /* not used for hvm */
+#if !defined(NDEBUG) || defined(PERF_COUNTERS)
+ uint64_t vtsc_kerncount;
+ uint64_t vtsc_usercount;
+#endif
/* Pseudophysical e820 map (XENMEM_memory_map). */
spinlock_t e820_lock;