|| (idlers_empty && new->pri > cur->pri) )
{
if ( cur->pri != CSCHED_PRI_IDLE )
- SCHED_STAT_CRANK(tickle_idlers_none);
+ SCHED_STAT_CRANK(tickled_busy_cpu);
+ else
+ SCHED_STAT_CRANK(tickled_idle_cpu);
__cpumask_set_cpu(cpu, &mask);
}
else if ( !idlers_empty )
set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
}
/* Tickle cpu anyway, to let new preempt cur. */
- SCHED_STAT_CRANK(tickle_idlers_none);
+ SCHED_STAT_CRANK(tickled_busy_cpu);
__cpumask_set_cpu(cpu, &mask);
}
else if ( !new_idlers_empty )
{
/* Which of the idlers suitable for new shall we wake up? */
- SCHED_STAT_CRANK(tickle_idlers_some);
+ SCHED_STAT_CRANK(tickled_idle_cpu);
if ( opt_tickle_one_idle )
{
this_cpu(last_tickle_cpu) =
/* Send scheduler interrupts to designated CPUs */
cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
}
+ else
+ SCHED_STAT_CRANK(tickled_no_cpu);
}
static void
i = cpumask_cycle(cpu, &mask);
if ( i < nr_cpu_ids )
{
+ SCHED_STAT_CRANK(tickled_idle_cpu);
ipid = i;
goto tickle;
}
* than the migrate resistance */
if ( ipid == -1 || lowest + CSCHED2_MIGRATE_RESIST > new->credit )
{
- SCHED_STAT_CRANK(tickle_idlers_none);
- goto no_tickle;
+ SCHED_STAT_CRANK(tickled_no_cpu);
+ return;
}
-tickle:
+ SCHED_STAT_CRANK(tickled_busy_cpu);
+ tickle:
BUG_ON(ipid == -1);
/* TRACE */ {
(unsigned char *)&d);
}
cpumask_set_cpu(ipid, &rqd->tickled);
- SCHED_STAT_CRANK(tickle_idlers_some);
cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ);
-
-no_tickle:
- return;
}
/*
/* 1) if new's previous cpu is idle, kick it for cache benefit */
if ( is_idle_vcpu(curr_on_cpu(new->vcpu->processor)) )
{
+ SCHED_STAT_CRANK(tickled_idle_cpu);
cpu_to_tickle = new->vcpu->processor;
goto out;
}
iter_vc = curr_on_cpu(cpu);
if ( is_idle_vcpu(iter_vc) )
{
+ SCHED_STAT_CRANK(tickled_idle_cpu);
cpu_to_tickle = cpu;
goto out;
}
if ( latest_deadline_vcpu != NULL &&
new->cur_deadline < latest_deadline_vcpu->cur_deadline )
{
+ SCHED_STAT_CRANK(tickled_busy_cpu);
cpu_to_tickle = latest_deadline_vcpu->vcpu->processor;
goto out;
}
/* didn't tickle any cpu */
- SCHED_STAT_CRANK(tickle_idlers_none);
+ SCHED_STAT_CRANK(tickled_no_cpu);
return;
-out:
+ out:
/* TRACE */
{
struct {
}
cpumask_set_cpu(cpu_to_tickle, &prv->tickled);
- SCHED_STAT_CRANK(tickle_idlers_some);
cpu_raise_softirq(cpu_to_tickle, SCHEDULE_SOFTIRQ);
return;
}
PERFCOUNTER(vcpu_wake_onrunq, "sched: vcpu_wake_onrunq")
PERFCOUNTER(vcpu_wake_runnable, "sched: vcpu_wake_runnable")
PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
-PERFCOUNTER(tickle_idlers_none, "sched: tickle_idlers_none")
-PERFCOUNTER(tickle_idlers_some, "sched: tickle_idlers_some")
+PERFCOUNTER(tickled_no_cpu, "sched: tickled_no_cpu")
+PERFCOUNTER(tickled_idle_cpu, "sched: tickled_idle_cpu")
+PERFCOUNTER(tickled_busy_cpu, "sched: tickled_busy_cpu")
PERFCOUNTER(vcpu_check, "sched: vcpu_check")
/* credit specific counters */