__trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
}
+static inline void trace_continue_running(struct vcpu *v)
+{
+ struct { uint32_t vcpu:16, domain:16; } d;
+
+ if ( likely(!tb_init_done) )
+ return;
+
+ d.vcpu = v->vcpu_id;
+ d.domain = v->domain->domain_id;
+
+ __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d),
+ (unsigned char *)&d);
+}
+
static inline void vcpu_runstate_change(
struct vcpu *v, int new_state, s_time_t new_entry_time)
{
if ( unlikely(prev == next) )
{
spin_unlock_irq(&sd->schedule_lock);
+ trace_continue_running(next);
return continue_running(prev);
}
#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
-#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
+#define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2)
#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)