Avoid negative runstate pieces.
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 10 Dec 2008 14:05:41 +0000 (14:05 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 10 Dec 2008 14:05:41 +0000 (14:05 +0000)
Also consolidate all places to get cpu idle time.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/acpi/cpu_idle.c
xen/arch/x86/platform_hypercall.c
xen/common/schedule.c
xen/common/sysctl.c
xen/drivers/cpufreq/cpufreq_ondemand.c
xen/include/xen/sched.h

index 3f7b16bfdd75b7f9bf804fbacf5a1e50c6d1ee10..bf88f14a78b8549bdda3866d8f560af8368a4c12 100644 (file)
@@ -749,7 +749,6 @@ uint32_t pmstat_get_cx_nr(uint32_t cpuid)
 int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
 {
     const struct acpi_processor_power *power = processor_powers[cpuid];
-    struct vcpu *v = idle_vcpu[cpuid];
     uint64_t usage;
     int i;
 
@@ -763,9 +762,7 @@ int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
 
     stat->last = power->last_state ? power->last_state->idx : 0;
     stat->nr = power->count;
-    stat->idle_time = v->runstate.time[RUNSTATE_running];
-    if ( v->is_running )
-        stat->idle_time += NOW() - v->runstate.state_entry_time;
+    stat->idle_time = get_cpu_idle_time(cpuid);
 
     for ( i = 0; i < power->count; i++ )
     {
index 28ab32678bf318e47f73b3d114dfa0ff61d23ab9..4bf677792325d6af92f055d4b18c96b780554dbf 100644 (file)
@@ -337,16 +337,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
         for_each_cpu_mask ( cpu, cpumap )
         {
             if ( (v = idle_vcpu[cpu]) != NULL )
-            {
-                idletime = v->runstate.time[RUNSTATE_running];
-                if ( v->is_running )
-                    idletime += now - v->runstate.state_entry_time;
-            }
-            else
-            {
-                idletime = 0;
                 cpu_clear(cpu, cpumap);
-            }
+            idletime = get_cpu_idle_time(cpu);
 
             ret = -EFAULT;
             if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
index 04b09e2168841150f2072543aa06dc37ff6ed792..872bcf397f3d45fb1a43cc6bc3f4a97a1fb3d18f 100644 (file)
@@ -84,33 +84,49 @@ static inline void trace_runstate_change(struct vcpu *v, int new_state)
 static inline void vcpu_runstate_change(
     struct vcpu *v, int new_state, s_time_t new_entry_time)
 {
+    s_time_t delta;
+
     ASSERT(v->runstate.state != new_state);
     ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
 
     trace_runstate_change(v, new_state);
 
-    v->runstate.time[v->runstate.state] +=
-        new_entry_time - v->runstate.state_entry_time;
-    v->runstate.state_entry_time = new_entry_time;
     v->runstate.state = new_state;
+
+    delta = new_entry_time - v->runstate.state_entry_time;
+    if ( delta > 0 )
+    {
+        v->runstate.time[v->runstate.state] += delta;
+        v->runstate.state_entry_time = new_entry_time;
+    }
 }
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 {
-    if ( likely(v == current) )
-    {
-        /* Fast lock-free path. */
-        memcpy(runstate, &v->runstate, sizeof(*runstate));
-        ASSERT(runstate->state == RUNSTATE_running);
-        runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
-    }
-    else
-    {
+    s_time_t delta;
+
+    if ( unlikely(v != current) )
         vcpu_schedule_lock_irq(v);
-        memcpy(runstate, &v->runstate, sizeof(*runstate));
-        runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
+
+    memcpy(runstate, &v->runstate, sizeof(*runstate));
+    delta = NOW() - runstate->state_entry_time;
+    if ( delta > 0 )
+        runstate->time[runstate->state] += delta;
+
+    if ( unlikely(v != current) )
         vcpu_schedule_unlock_irq(v);
-    }
+}
+
+uint64_t get_cpu_idle_time(unsigned int cpu)
+{
+    struct vcpu_runstate_info state = { .state = RUNSTATE_running };
+    struct vcpu *v;
+
+    if ( (v = idle_vcpu[cpu]) == NULL )
+        return 0;
+
+    vcpu_runstate_get(v, &state);
+    return state.time[RUNSTATE_running];
 }
 
 int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
index 9350cfe3c2bdd780e4b8d3687d420e6699237359..93dd2ffe58ec85dbcdf3c37572fef136d4e67706 100644 (file)
@@ -167,7 +167,6 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
     {
         uint32_t i, nr_cpus;
         struct xen_sysctl_cpuinfo cpuinfo;
-        struct vcpu *v;
 
         nr_cpus = min_t(uint32_t, op->u.getcpuinfo.max_cpus, NR_CPUS);
 
@@ -177,13 +176,7 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 
         for ( i = 0; i < nr_cpus; i++ )
         {
-            /* Assume no holes in idle-vcpu map. */
-            if ( (v = idle_vcpu[i]) == NULL )
-                break;
-
-            cpuinfo.idletime = v->runstate.time[RUNSTATE_running];
-            if ( v->is_running )
-                cpuinfo.idletime += NOW() - v->runstate.state_entry_time;
+            cpuinfo.idletime = get_cpu_idle_time(i);
 
             ret = -EFAULT;
             if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) )
index a34912cbc037ab96b4b75e022ddeff4f33bfbe4d..f5017beda63b5c61343b9823fad8dae83ed8f359 100644 (file)
@@ -95,21 +95,6 @@ int get_cpufreq_ondemand_para(uint32_t *sampling_rate_max,
     return 0;
 }
 
-uint64_t get_cpu_idle_time(unsigned int cpu)
-{
-    uint64_t idle_ns;
-    struct vcpu *v;
-
-    if ((v = idle_vcpu[cpu]) == NULL)
-        return 0;
-
-    idle_ns = v->runstate.time[RUNSTATE_running];
-    if (v->is_running)
-        idle_ns += NOW() - v->runstate.state_entry_time;
-
-    return idle_ns;
-}
-
 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 {
     unsigned int load = 0;
index 540f1c6b8c31fd289036506fa5845c5b567c86e2..a65032da495a17349633aec438b55c6125f18932 100644 (file)
@@ -538,6 +538,7 @@ int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
 void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
+uint64_t get_cpu_idle_time(unsigned int cpu);
 
 #define IS_PRIV(_d) ((_d)->is_privileged)
 #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))