int pmstat_get_cx_stat(uint32_t cpuid, struct pm_cx_stat *stat)
{
const struct acpi_processor_power *power = processor_powers[cpuid];
- struct vcpu *v = idle_vcpu[cpuid];
uint64_t usage;
int i;
stat->last = power->last_state ? power->last_state->idx : 0;
stat->nr = power->count;
- stat->idle_time = v->runstate.time[RUNSTATE_running];
- if ( v->is_running )
- stat->idle_time += NOW() - v->runstate.state_entry_time;
+ stat->idle_time = get_cpu_idle_time(cpuid);
for ( i = 0; i < power->count; i++ )
{
for_each_cpu_mask ( cpu, cpumap )
{
if ( (v = idle_vcpu[cpu]) != NULL )
- {
- idletime = v->runstate.time[RUNSTATE_running];
- if ( v->is_running )
- idletime += now - v->runstate.state_entry_time;
- }
- else
- {
- idletime = 0;
cpu_clear(cpu, cpumap);
- }
+ idletime = get_cpu_idle_time(cpu);
ret = -EFAULT;
if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) )
static inline void vcpu_runstate_change(
struct vcpu *v, int new_state, s_time_t new_entry_time)
{
+ s_time_t delta;
+
ASSERT(v->runstate.state != new_state);
ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
trace_runstate_change(v, new_state);
- v->runstate.time[v->runstate.state] +=
- new_entry_time - v->runstate.state_entry_time;
- v->runstate.state_entry_time = new_entry_time;
v->runstate.state = new_state;
+
+ delta = new_entry_time - v->runstate.state_entry_time;
+ if ( delta > 0 )
+ {
+ v->runstate.time[v->runstate.state] += delta;
+ v->runstate.state_entry_time = new_entry_time;
+ }
}
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
{
- if ( likely(v == current) )
- {
- /* Fast lock-free path. */
- memcpy(runstate, &v->runstate, sizeof(*runstate));
- ASSERT(runstate->state == RUNSTATE_running);
- runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
- }
- else
- {
+ s_time_t delta;
+
+ if ( unlikely(v != current) )
vcpu_schedule_lock_irq(v);
- memcpy(runstate, &v->runstate, sizeof(*runstate));
- runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
+
+ memcpy(runstate, &v->runstate, sizeof(*runstate));
+ delta = NOW() - runstate->state_entry_time;
+ if ( delta > 0 )
+ runstate->time[runstate->state] += delta;
+
+ if ( unlikely(v != current) )
vcpu_schedule_unlock_irq(v);
- }
+}
+
+uint64_t get_cpu_idle_time(unsigned int cpu)
+{
+ struct vcpu_runstate_info state = { .state = RUNSTATE_running };
+ struct vcpu *v;
+
+ if ( (v = idle_vcpu[cpu]) == NULL )
+ return 0;
+
+ vcpu_runstate_get(v, &state);
+ return state.time[RUNSTATE_running];
}
int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
uint32_t i, nr_cpus;
struct xen_sysctl_cpuinfo cpuinfo;
- struct vcpu *v;
nr_cpus = min_t(uint32_t, op->u.getcpuinfo.max_cpus, NR_CPUS);
for ( i = 0; i < nr_cpus; i++ )
{
- /* Assume no holes in idle-vcpu map. */
- if ( (v = idle_vcpu[i]) == NULL )
- break;
-
- cpuinfo.idletime = v->runstate.time[RUNSTATE_running];
- if ( v->is_running )
- cpuinfo.idletime += NOW() - v->runstate.state_entry_time;
+ cpuinfo.idletime = get_cpu_idle_time(i);
ret = -EFAULT;
if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) )