acceptable deadline range, rather than just deadline start.
Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
static unsigned int get_sleep_length_us(void)
{
- s_time_t us = (per_cpu(timer_deadline, smp_processor_id()) - NOW()) / 1000;
+ s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
/*
* while us < 0 or us > (u32)-1, return a large u32,
* choose (unsigned int)-2000 to avoid wrapping while added with exit
/* find all expired events */
for_each_cpu_mask(cpu, ch->cpumask)
{
- if ( per_cpu(timer_deadline, cpu) <= now )
+ if ( per_cpu(timer_deadline_start, cpu) <= now )
cpu_set(cpu, mask);
- else if ( per_cpu(timer_deadline, cpu) < next_event )
- next_event = per_cpu(timer_deadline, cpu);
+ else if ( per_cpu(timer_deadline_end, cpu) < next_event )
+ next_event = per_cpu(timer_deadline_end, cpu);
}
/* wakeup the cpus which have an expired event. */
int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
- if ( this_cpu(timer_deadline) == 0 )
+ if ( this_cpu(timer_deadline_start) == 0 )
return;
if ( !ch )
cpu_set(cpu, ch->cpumask);
/* reprogram if current cpu expire time is nearer */
- if ( this_cpu(timer_deadline) < ch->next_event )
- reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
+ if ( this_cpu(timer_deadline_end) < ch->next_event )
+ reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
spin_unlock(&ch->lock);
}
int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
- if ( this_cpu(timer_deadline) == 0 )
+ if ( this_cpu(timer_deadline_start) == 0 )
return;
BUG_ON( !ch );
{
/* Reprogram the deadline; trigger timer work now if it has passed. */
enable_APIC_timer();
- if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
+ if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
raise_softirq(TIMER_SOFTIRQ);
if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
int cpu = smp_processor_id();
if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
- reprogram_timer(per_cpu(timer_deadline, cpu));
+ reprogram_timer(per_cpu(timer_deadline_start, cpu));
}
int pit_broadcast_is_available(void)
static DEFINE_PER_CPU(struct timers, timers);
-DEFINE_PER_CPU(s_time_t, timer_deadline);
+DEFINE_PER_CPU(s_time_t, timer_deadline_start);
+DEFINE_PER_CPU(s_time_t, timer_deadline_end);
/****************************************************************************
* HEAP OPERATIONS.
if ( unlikely(ts->overflow) )
{
/* Find earliest deadline at head of list or top of heap. */
- this_cpu(timer_deadline) = ts->list->expires;
+ this_cpu(timer_deadline_start) = ts->list->expires;
if ( (GET_HEAP_SIZE(heap) != 0) &&
- ((t = heap[1])->expires < this_cpu(timer_deadline)) )
- this_cpu(timer_deadline) = t->expires;
+ ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
+ this_cpu(timer_deadline_start) = t->expires;
+ this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
}
else
{
end = t->expires_end;
}
- this_cpu(timer_deadline) = start;
+ this_cpu(timer_deadline_start) = start;
+ this_cpu(timer_deadline_end) = end;
}
- if ( !reprogram_timer(this_cpu(timer_deadline)) )
+ if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
raise_softirq(TIMER_SOFTIRQ);
spin_unlock_irq(&ts->lock);
* Next timer deadline for each CPU.
* Modified only by the local CPU and never in interrupt context.
*/
-DECLARE_PER_CPU(s_time_t, timer_deadline);
+DECLARE_PER_CPU(s_time_t, timer_deadline_start);
+DECLARE_PER_CPU(s_time_t, timer_deadline_end);
/* Arch-defined function to reprogram timer hardware for new deadline. */
extern int reprogram_timer(s_time_t timeout);