xen: sched: make the 'tickled' perf counter clearer
authorDario Faggioli <dario.faggioli@citrix.com>
Wed, 6 Jul 2016 15:54:02 +0000 (16:54 +0100)
committerGeorge Dunlap <george.dunlap@citrix.com>
Fri, 8 Jul 2016 10:04:48 +0000 (11:04 +0100)
In fact, what we have right now, i.e., tickle_idlers_none
and tickle_idlers_some, is not good enough for describing
what really happens in the various tickling functions of
the various scheduler.

Switch to a more descriptive set of counters, such as:
 - tickled_no_cpu: for when we don't tickle anyone
 - tickled_idle_cpu: for when we tickle one or more
                     idler
 - tickled_busy_cpu: for when we tickle one or more
                     non-idler

While there, fix style of an "out:" label in sched_rt.c.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Reviewed-by: Meng Xu <mengxu@cis.upenn.edu>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/common/sched_credit.c
xen/common/sched_credit2.c
xen/common/sched_rt.c
xen/include/xen/perfc_defn.h

index a38a63d9aa16f596d43853cf9f6f801bdbc8d47d..ac227461776d77edd2f51d37be32c0d3e5241536 100644 (file)
@@ -385,7 +385,9 @@ static inline void __runq_tickle(struct csched_vcpu *new)
          || (idlers_empty && new->pri > cur->pri) )
     {
         if ( cur->pri != CSCHED_PRI_IDLE )
-            SCHED_STAT_CRANK(tickle_idlers_none);
+            SCHED_STAT_CRANK(tickled_busy_cpu);
+        else
+            SCHED_STAT_CRANK(tickled_idle_cpu);
         __cpumask_set_cpu(cpu, &mask);
     }
     else if ( !idlers_empty )
@@ -444,13 +446,13 @@ static inline void __runq_tickle(struct csched_vcpu *new)
                     set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
                 }
                 /* Tickle cpu anyway, to let new preempt cur. */
-                SCHED_STAT_CRANK(tickle_idlers_none);
+                SCHED_STAT_CRANK(tickled_busy_cpu);
                 __cpumask_set_cpu(cpu, &mask);
             }
             else if ( !new_idlers_empty )
             {
                 /* Which of the idlers suitable for new shall we wake up? */
-                SCHED_STAT_CRANK(tickle_idlers_some);
+                SCHED_STAT_CRANK(tickled_idle_cpu);
                 if ( opt_tickle_one_idle )
                 {
                     this_cpu(last_tickle_cpu) =
@@ -479,6 +481,8 @@ static inline void __runq_tickle(struct csched_vcpu *new)
         /* Send scheduler interrupts to designated CPUs */
         cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
     }
+    else
+        SCHED_STAT_CRANK(tickled_no_cpu);
 }
 
 static void
index 1933ff15a2f7eac4f853c7583ef114766c7b18d9..ebad7347a12b25bccd4f0198f2c21a8b0b2f4d25 100644 (file)
@@ -589,6 +589,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu *
     i = cpumask_cycle(cpu, &mask);
     if ( i < nr_cpu_ids )
     {
+        SCHED_STAT_CRANK(tickled_idle_cpu);
         ipid = i;
         goto tickle;
     }
@@ -637,11 +638,12 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched2_vcpu *
      * than the migrate resistance */
     if ( ipid == -1 || lowest + CSCHED2_MIGRATE_RESIST > new->credit )
     {
-        SCHED_STAT_CRANK(tickle_idlers_none);
-        goto no_tickle;
+        SCHED_STAT_CRANK(tickled_no_cpu);
+        return;
     }
 
-tickle:
+    SCHED_STAT_CRANK(tickled_busy_cpu);
+ tickle:
     BUG_ON(ipid == -1);
 
     /* TRACE */ {
@@ -654,11 +656,7 @@ tickle:
                   (unsigned char *)&d);
     }
     cpumask_set_cpu(ipid, &rqd->tickled);
-    SCHED_STAT_CRANK(tickle_idlers_some);
     cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ);
-
-no_tickle:
-    return;
 }
 
 /*
index 8c45251ff4f1153814882a00cd5fbb9443111e33..98524a63bc84a5bb9f84b50239768c047cdbb7e3 100644 (file)
@@ -1146,6 +1146,7 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
     /* 1) if new's previous cpu is idle, kick it for cache benefit */
     if ( is_idle_vcpu(curr_on_cpu(new->vcpu->processor)) )
     {
+        SCHED_STAT_CRANK(tickled_idle_cpu);
         cpu_to_tickle = new->vcpu->processor;
         goto out;
     }
@@ -1157,6 +1158,7 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
         iter_vc = curr_on_cpu(cpu);
         if ( is_idle_vcpu(iter_vc) )
         {
+            SCHED_STAT_CRANK(tickled_idle_cpu);
             cpu_to_tickle = cpu;
             goto out;
         }
@@ -1170,14 +1172,15 @@ runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
     if ( latest_deadline_vcpu != NULL &&
          new->cur_deadline < latest_deadline_vcpu->cur_deadline )
     {
+        SCHED_STAT_CRANK(tickled_busy_cpu);
         cpu_to_tickle = latest_deadline_vcpu->vcpu->processor;
         goto out;
     }
 
     /* didn't tickle any cpu */
-    SCHED_STAT_CRANK(tickle_idlers_none);
+    SCHED_STAT_CRANK(tickled_no_cpu);
     return;
-out:
+ out:
     /* TRACE */
     {
         struct {
@@ -1191,7 +1194,6 @@ out:
     }
 
     cpumask_set_cpu(cpu_to_tickle, &prv->tickled);
-    SCHED_STAT_CRANK(tickle_idlers_some);
     cpu_raise_softirq(cpu_to_tickle, SCHEDULE_SOFTIRQ);
     return;
 }
index 21c1e0b54dd6f569dd6b3d617a0cf1f7293af254..a336c71de85d5758753be65656f907c0a7862561 100644 (file)
@@ -27,8 +27,9 @@ PERFCOUNTER(vcpu_wake_running,      "sched: vcpu_wake_running")
 PERFCOUNTER(vcpu_wake_onrunq,       "sched: vcpu_wake_onrunq")
 PERFCOUNTER(vcpu_wake_runnable,     "sched: vcpu_wake_runnable")
 PERFCOUNTER(vcpu_wake_not_runnable, "sched: vcpu_wake_not_runnable")
-PERFCOUNTER(tickle_idlers_none,     "sched: tickle_idlers_none")
-PERFCOUNTER(tickle_idlers_some,     "sched: tickle_idlers_some")
+PERFCOUNTER(tickled_no_cpu,         "sched: tickled_no_cpu")
+PERFCOUNTER(tickled_idle_cpu,       "sched: tickled_idle_cpu")
+PERFCOUNTER(tickled_busy_cpu,       "sched: tickled_busy_cpu")
 PERFCOUNTER(vcpu_check,             "sched: vcpu_check")
 
 /* credit specific counters */