{
unsigned int cpu = smp_processor_id();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
local_irq_disable();
}
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
}
void idle_loop(void)
static void acpi_processor_idle(void)
{
- struct acpi_processor_power *power = processor_powers[smp_processor_id()];
+ unsigned int cpu = smp_processor_id();
+ struct acpi_processor_power *power = processor_powers[cpu];
struct acpi_processor_cx *cx = NULL;
int next_state;
uint64_t t1, t2 = 0;
cpufreq_dbs_timer_suspend();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
/*
*/
local_irq_disable();
- if ( !cpu_is_haltable(smp_processor_id()) )
+ if ( !cpu_is_haltable(cpu) )
{
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
/* Now in C0 */
power->last_state = &power->states[0];
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
/* Now in C0 */
power->last_state = &power->states[0];
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
if ( cpuidle_current_governor->reflect )
cpufreq_dbs_timer_suspend();
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ rcu_idle_enter(cpu);
+ /* rcu_idle_enter() can raise TIMER_SOFTIRQ. Process it now. */
process_pending_softirqs();
/* Interrupts must be disabled for C2 and higher transitions. */
if (!cpu_is_haltable(cpu)) {
local_irq_enable();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
return;
}
if (!(lapic_timer_reliable_states & (1 << cstate)))
lapic_timer_on();
- sched_tick_resume();
+ rcu_idle_exit(cpu);
cpufreq_dbs_timer_resume();
if ( cpuidle_current_governor->reflect )
* periodically poke rcu_pedning(), so that it will invoke the callback
* not too late after the end of the grace period.
*/
-void rcu_idle_timer_start()
+static void rcu_idle_timer_start(void)
{
struct rcu_data *rdp = &this_cpu(rcu_data);
rdp->idle_timer_active = true;
}
-void rcu_idle_timer_stop()
+static void rcu_idle_timer_stop(void)
{
struct rcu_data *rdp = &this_cpu(rcu_data);
* Se the comment before cpumask_andnot() in rcu_start_batch().
*/
smp_mb();
+
+ rcu_idle_timer_start();
}
void rcu_idle_exit(unsigned int cpu)
{
+ rcu_idle_timer_stop();
ASSERT(cpumask_test_cpu(cpu, &rcu_ctrlblk.idle_cpumask));
cpumask_clear_cpu(cpu, &rcu_ctrlblk.idle_cpumask);
}
rcu_read_unlock(&sched_res_rculock);
}
-void sched_tick_suspend(void)
-{
- rcu_idle_enter(smp_processor_id());
- rcu_idle_timer_start();
-}
-
-void sched_tick_resume(void)
-{
- rcu_idle_timer_stop();
- rcu_idle_exit(smp_processor_id());
-}
-
void wait(void)
{
schedule();
void rcu_idle_enter(unsigned int cpu);
void rcu_idle_exit(unsigned int cpu);
-void rcu_idle_timer_start(void);
-void rcu_idle_timer_stop(void);
-
#endif /* __XEN_RCUPDATE_H */
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
long sched_adjust_global(struct xen_sysctl_scheduler_op *);
int sched_id(void);
-void sched_tick_suspend(void);
-void sched_tick_resume(void);
void vcpu_wake(struct vcpu *v);
long vcpu_yield(void);
void vcpu_sleep_nosync(struct vcpu *v);