meaningless and unnecessary.
Rename rem_timer -> stop_timer.
Signed-off-by: Keir Fraser <keir@xensource.com>
itc_freq = local_cpu_data->itc_freq;
vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
- init_ac_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, 0);
+ init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, 0);
vtm_reset(vcpu);
}
local_irq_save(spsr);
itv = VCPU(vcpu, itv);
if ( ITV_IRQ_MASK(itv) )
- rem_ac_timer(&vtm->vtm_timer);
+ stop_timer(&vtm->vtm_timer);
vtm_interruption_update(vcpu, vtm);
local_irq_restore(spsr);
}
/*
- * Update interrupt or hook the vtm ac_timer for fire
+ * Update interrupt or hook the vtm timer for fire
* At this point vtm_timer should be removed if itv is masked.
*/
/* Interrupt must be disabled at this point */
extern u64 cycle_to_ns(u64 cyle);
-#define TIMER_SLOP (50*1000) /* ns */ /* copy from ac_timer.c */
+#define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
{
uint64_t cur_itc,vitm,vitv;
if ( diff_last >= 0 ) {
// interrupt already fired.
- rem_ac_timer(&vtm->vtm_timer);
+ stop_timer(&vtm->vtm_timer);
}
else if ( diff_now >= 0 ) {
// ITV is fired.
/* Both last_itc & cur_itc < itm, wait for fire condition */
else {
expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP;
- set_ac_timer(&vtm->vtm_timer, expires);
+ set_timer(&vtm->vtm_timer, expires);
}
local_irq_restore(spsr);
}
/*
* Action for vtm when the domain is scheduled out.
- * Remove the ac_timer for vtm.
+ * Remove the timer for vtm.
*/
void vtm_domain_out(VCPU *vcpu)
{
if(!is_idle_domain(vcpu->domain))
- rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
+ stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
}
/*
* Action for vtm when the domain is scheduled in.
- * Fire vtm IRQ or add the ac_timer for vtm.
+ * Fire vtm IRQ or add the timer for vtm.
*/
void vtm_domain_in(VCPU *vcpu)
{
void raise_actimer_softirq(void)
{
- raise_softirq(AC_TIMER_SOFTIRQ);
+ raise_softirq(TIMER_SOFTIRQ);
}
unsigned long
init_IRQ ();
printk("About to call init_xen_time()\n");
init_xen_time(); /* initialise the time */
-printk("About to call ac_timer_init()\n");
- ac_timer_init();
+printk("About to call timer_init()\n");
+ timer_init();
#ifdef CONFIG_XEN_CONSOLE_INPUT /* CONFIG_SERIAL_8250_CONSOLE=n in dom0! */
initialize_keytable();
//#endif
/* double check, in case we got hit by a (slow) PMI: */
} while (time_after_eq(ia64_get_itc(), new_itm));
- raise_softirq(AC_TIMER_SOFTIRQ);
+ raise_softirq(TIMER_SOFTIRQ);
return IRQ_HANDLED;
}
return 0;
}
-int reprogram_ac_timer(s_time_t timeout)
+int reprogram_timer(s_time_t timeout)
{
struct vcpu *v = current;
s_time_t expire;
* returns 1 on success
* returns 0 if the timeout value is too small or in the past.
*/
-int reprogram_ac_timer(s_time_t timeout)
+int reprogram_timer(s_time_t timeout)
{
s_time_t now;
s_time_t expire;
{
ack_APIC_irq();
perfc_incrc(apic_timer);
- raise_softirq(AC_TIMER_SOFTIRQ);
+ raise_softirq(TIMER_SOFTIRQ);
}
/*
static unsigned int nmi_hz = HZ;
static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
static unsigned int nmi_p4_cccr_val;
-static struct ac_timer nmi_timer[NR_CPUS];
+static struct timer nmi_timer[NR_CPUS];
static unsigned int nmi_timer_ticks[NR_CPUS];
/*
{
int cpu = smp_processor_id();
nmi_timer_ticks[cpu]++;
- set_ac_timer(&nmi_timer[cpu], NOW() + MILLISECS(1000));
+ set_timer(&nmi_timer[cpu], NOW() + MILLISECS(1000));
}
static void disable_lapic_nmi_watchdog(void)
lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
nmi_active = 1;
- init_ac_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu);
+ init_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu);
}
static unsigned int
* during setup because the timer infrastructure is not available.
*/
for_each_online_cpu ( cpu )
- set_ac_timer(&nmi_timer[cpu], NOW());
+ set_timer(&nmi_timer[cpu], NOW());
}
spin_unlock_irqrestore(&watchdog_lock, flags);
trap_init();
- ac_timer_init();
+ timer_init();
early_time_init();
#include <xen/config.h>
#include <xen/init.h>
#include <xen/time.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/smp.h>
#include <xen/irq.h>
#include <xen/softirq.h>
s_time_t stime_local_stamp;
s_time_t stime_master_stamp;
struct time_scale tsc_scale;
- struct ac_timer calibration_timer;
+ struct timer calibration_timer;
} __cacheline_aligned;
static struct cpu_time cpu_time[NR_CPUS];
/* Rough hack to allow accurate timers to sort-of-work with no APIC. */
if ( !cpu_has_apic )
- raise_softirq(AC_TIMER_SOFTIRQ);
+ raise_softirq(TIMER_SOFTIRQ);
if ( using_pit )
pit_overflow();
/* Protected by platform_timer_lock. */
static u64 hpet_counter64, hpet_overflow_period;
static u32 hpet_stamp;
-static struct ac_timer hpet_overflow_timer;
+static struct timer hpet_overflow_timer;
static void hpet_overflow(void *unused)
{
hpet_stamp = counter;
spin_unlock_irq(&platform_timer_lock);
- set_ac_timer(&hpet_overflow_timer, NOW() + hpet_overflow_period);
+ set_timer(&hpet_overflow_timer, NOW() + hpet_overflow_period);
}
static u64 read_hpet_count(void)
(void)do_div(hpet_overflow_period, (u32)hpet_rate);
}
- init_ac_timer(&hpet_overflow_timer, hpet_overflow, NULL, 0);
+ init_timer(&hpet_overflow_timer, hpet_overflow, NULL, 0);
hpet_overflow(NULL);
platform_timer_stamp = hpet_counter64;
/* Protected by platform_timer_lock. */
static u64 cyclone_counter64;
static u32 cyclone_stamp;
-static struct ac_timer cyclone_overflow_timer;
+static struct timer cyclone_overflow_timer;
static volatile u32 *cyclone_timer; /* Cyclone MPMC0 register */
static void cyclone_overflow(void *unused)
cyclone_stamp = counter;
spin_unlock_irq(&platform_timer_lock);
- set_ac_timer(&cyclone_overflow_timer, NOW() + MILLISECS(20000));
+ set_timer(&cyclone_overflow_timer, NOW() + MILLISECS(20000));
}
static u64 read_cyclone_count(void)
read_platform_count = read_cyclone_count;
- init_ac_timer(&cyclone_overflow_timer, cyclone_overflow, NULL, 0);
+ init_timer(&cyclone_overflow_timer, cyclone_overflow, NULL, 0);
cyclone_overflow(NULL);
platform_timer_stamp = cyclone_counter64;
set_time_scale(&platform_timer_scale, CYCLONE_TIMER_FREQ);
cpu_time[cpu].stime_master_stamp = curr_master_stime;
out:
- set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
+ set_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
if ( cpu == 0 )
platform_time_calibration();
cpu_time[cpu].stime_master_stamp = now;
cpu_time[cpu].stime_local_stamp = now;
- init_ac_timer(&cpu_time[cpu].calibration_timer,
+ init_timer(&cpu_time[cpu].calibration_timer,
local_time_calibration, NULL, cpu);
- set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
+ set_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
}
/* Late init function (after all CPUs are booted). */
destroy_vmcs(&v->arch.arch_vmx);
free_monitor_pagetable(v);
vpit = &v->domain->arch.vmx_platform.vmx_pit;
- if ( active_ac_timer(&(vpit->pit_timer)) )
- rem_ac_timer(&vpit->pit_timer);
- if ( active_ac_timer(&v->arch.arch_vmx.hlt_timer) )
- rem_ac_timer(&v->arch.arch_vmx.hlt_timer);
+ if ( active_timer(&(vpit->pit_timer)) )
+ stop_timer(&vpit->pit_timer);
+ if ( active_timer(&v->arch.arch_vmx.hlt_timer) )
+ stop_timer(&v->arch.arch_vmx.hlt_timer);
if ( vmx_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
- rem_ac_timer(&VLAPIC(v)->vlapic_timer);
+ stop_timer(&VLAPIC(v)->vlapic_timer);
xfree(VLAPIC(v));
}
}
next_wakeup = next_pit;
}
if ( next_wakeup != - 1 )
- set_ac_timer(¤t->arch.arch_vmx.hlt_timer, next_wakeup);
+ set_timer(¤t->arch.arch_vmx.hlt_timer, next_wakeup);
do_block();
}
vpit->pending_intr_nr++;
if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) {
vpit->scheduled += vpit->period;
- set_ac_timer(&vpit->pit_timer, vpit->scheduled);
+ set_timer(&vpit->pit_timer, vpit->scheduled);
}
}
void pickup_deactive_ticks(struct vmx_virpit *vpit)
{
- if ( !active_ac_timer(&(vpit->pit_timer)) ) {
+ if ( !active_timer(&(vpit->pit_timer)) ) {
/* pick up missed timer tick */
missed_ticks(vpit);
vpit->scheduled += vpit->period;
- set_ac_timer(&vpit->pit_timer, vpit->scheduled);
+ set_timer(&vpit->pit_timer, vpit->scheduled);
}
}
/* load init count*/
if (p->state == STATE_IORESP_HOOK) {
/* set up actimer, handle re-init */
- if ( active_ac_timer(&(vpit->pit_timer)) ) {
+ if ( active_timer(&(vpit->pit_timer)) ) {
VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT: guest reset PIT with channel %lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) );
- rem_ac_timer(&(vpit->pit_timer));
+ stop_timer(&(vpit->pit_timer));
reinit = 1;
}
else {
- init_ac_timer(&vpit->pit_timer, pit_timer_fn, v, v->processor);
+ init_timer(&vpit->pit_timer, pit_timer_fn, v, v->processor);
}
/* init count for this channel */
}
vpit->scheduled = NOW() + vpit->period;
- set_ac_timer(&vpit->pit_timer, vpit->scheduled);
+ set_timer(&vpit->pit_timer, vpit->scheduled);
/*restore the state*/
p->state = STATE_IORESP_READY;
if ( !vpit->first_injected ) {
vpit->pending_intr_nr = 0;
vpit->scheduled = NOW() + vpit->period;
- set_ac_timer(&vpit->pit_timer, vpit->scheduled);
+ set_timer(&vpit->pit_timer, vpit->scheduled);
vpit->first_injected = 1;
} else {
vpit->pending_intr_nr--;
(262144 / get_apic_bus_scale()) * vlapic->timer_divide_counter;
vlapic->vlapic_timer.expires = cur + offset;
- set_ac_timer(&(vlapic->vlapic_timer), vlapic->vlapic_timer.expires );
+ set_timer(&(vlapic->vlapic_timer), vlapic->vlapic_timer.expires );
VMX_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_begin_timer: "
"bus_scale %x now %08x%08x expire %08x%08x "
case APIC_TMICT:
if (vlapic_timer_active(vlapic))
- rem_ac_timer(&(vlapic->vlapic_timer));
+ stop_timer(&(vlapic->vlapic_timer));
vlapic->timer_initial = val;
vlapic->timer_current = val;
vlapic->timer_current = vlapic->timer_initial;
offset = vlapic->timer_current * (262144/get_apic_bus_scale()) * vlapic->timer_divide_counter;
vlapic->vlapic_timer.expires = NOW() + offset;
- set_ac_timer(&(vlapic->vlapic_timer), vlapic->vlapic_timer.expires);
+ set_timer(&(vlapic->vlapic_timer), vlapic->vlapic_timer.expires);
}else {
vlapic->timer_current = 0;
}
vmx_vioapic_add_lapic(vlapic, v);
- init_ac_timer(&vlapic->vlapic_timer,
+ init_timer(&vlapic->vlapic_timer,
vlapic_timer_fn, vlapic, v->processor);
#ifdef VLAPIC_NO_BIOS
vlapic_init(v);
vmx_set_host_env(v);
- init_ac_timer(&v->arch.arch_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
+ init_timer(&v->arch.arch_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
error |= __vmwrite(GUEST_LDTR_BASE, 0);
+++ /dev/null
-/******************************************************************************
- * ac_timer.c
- *
- * Copyright (c) 2002-2003 Rolf Neugebauer
- * Copyright (c) 2002-2005 K A Fraser
- */
-
-#include <xen/config.h>
-#include <xen/init.h>
-#include <xen/types.h>
-#include <xen/errno.h>
-#include <xen/sched.h>
-#include <xen/lib.h>
-#include <xen/smp.h>
-#include <xen/perfc.h>
-#include <xen/time.h>
-#include <xen/softirq.h>
-#include <xen/ac_timer.h>
-#include <xen/keyhandler.h>
-#include <asm/system.h>
-#include <asm/desc.h>
-
-/*
- * We pull handlers off the timer list this far in future,
- * rather than reprogramming the time hardware.
- */
-#define TIMER_SLOP (50*1000) /* ns */
-
-struct ac_timers {
- spinlock_t lock;
- struct ac_timer **heap;
- unsigned int softirqs;
-} __cacheline_aligned;
-
-struct ac_timers ac_timers[NR_CPUS];
-
-extern int reprogram_ac_timer(s_time_t timeout);
-
-/****************************************************************************
- * HEAP OPERATIONS.
- */
-
-#define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
-#define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
-
-#define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
-#define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
-
-/* Sink down element @pos of @heap. */
-static void down_heap(struct ac_timer **heap, int pos)
-{
- int sz = GET_HEAP_SIZE(heap), nxt;
- struct ac_timer *t = heap[pos];
-
- while ( (nxt = (pos << 1)) <= sz )
- {
- if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
- nxt++;
- if ( heap[nxt]->expires > t->expires )
- break;
- heap[pos] = heap[nxt];
- heap[pos]->heap_offset = pos;
- pos = nxt;
- }
-
- heap[pos] = t;
- t->heap_offset = pos;
-}
-
-/* Float element @pos up @heap. */
-static void up_heap(struct ac_timer **heap, int pos)
-{
- struct ac_timer *t = heap[pos];
-
- while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
- {
- heap[pos] = heap[pos>>1];
- heap[pos]->heap_offset = pos;
- pos >>= 1;
- }
-
- heap[pos] = t;
- t->heap_offset = pos;
-}
-
-
-/* Delete @t from @heap. Return TRUE if new top of heap. */
-static int remove_entry(struct ac_timer **heap, struct ac_timer *t)
-{
- int sz = GET_HEAP_SIZE(heap);
- int pos = t->heap_offset;
-
- t->heap_offset = 0;
-
- if ( unlikely(pos == sz) )
- {
- SET_HEAP_SIZE(heap, sz-1);
- goto out;
- }
-
- heap[pos] = heap[sz];
- heap[pos]->heap_offset = pos;
-
- SET_HEAP_SIZE(heap, --sz);
-
- if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
- up_heap(heap, pos);
- else
- down_heap(heap, pos);
-
- out:
- return (pos == 1);
-}
-
-
-/* Add new entry @t to @heap. Return TRUE if new top of heap. */
-static int add_entry(struct ac_timer ***pheap, struct ac_timer *t)
-{
- struct ac_timer **heap = *pheap;
- int sz = GET_HEAP_SIZE(heap);
-
- /* Copy the heap if it is full. */
- if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
- {
- /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
- int old_limit = GET_HEAP_LIMIT(heap);
- int new_limit = ((old_limit + 1) << 4) - 1;
- heap = xmalloc_array(struct ac_timer *, new_limit + 1);
- BUG_ON(heap == NULL);
- memcpy(heap, *pheap, (old_limit + 1) * sizeof(*heap));
- SET_HEAP_LIMIT(heap, new_limit);
- if ( old_limit != 0 )
- xfree(*pheap);
- *pheap = heap;
- }
-
- SET_HEAP_SIZE(heap, ++sz);
- heap[sz] = t;
- t->heap_offset = sz;
- up_heap(heap, sz);
- return (t->heap_offset == 1);
-}
-
-
-/****************************************************************************
- * TIMER OPERATIONS.
- */
-
-static inline void __add_ac_timer(struct ac_timer *timer)
-{
- int cpu = timer->cpu;
- if ( add_entry(&ac_timers[cpu].heap, timer) )
- cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
-}
-
-
-static inline void __rem_ac_timer(struct ac_timer *timer)
-{
- int cpu = timer->cpu;
- if ( remove_entry(ac_timers[cpu].heap, timer) )
- cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
-}
-
-
-void set_ac_timer(struct ac_timer *timer, s_time_t expires)
-{
- int cpu = timer->cpu;
- unsigned long flags;
-
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
- ASSERT(timer != NULL);
- if ( active_ac_timer(timer) )
- __rem_ac_timer(timer);
- timer->expires = expires;
- __add_ac_timer(timer);
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-}
-
-
-void rem_ac_timer(struct ac_timer *timer)
-{
- int cpu = timer->cpu;
- unsigned long flags;
-
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
- ASSERT(timer != NULL);
- if ( active_ac_timer(timer) )
- __rem_ac_timer(timer);
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
-}
-
-
-static void ac_timer_softirq_action(void)
-{
- int cpu = smp_processor_id();
- struct ac_timer *t, **heap;
- s_time_t now;
- void (*fn)(void *);
-
- spin_lock_irq(&ac_timers[cpu].lock);
-
- do {
- heap = ac_timers[cpu].heap;
- now = NOW();
-
- while ( (GET_HEAP_SIZE(heap) != 0) &&
- ((t = heap[1])->expires < (now + TIMER_SLOP)) )
- {
- remove_entry(heap, t);
-
- if ( (fn = t->function) != NULL )
- {
- void *data = t->data;
- spin_unlock_irq(&ac_timers[cpu].lock);
- (*fn)(data);
- spin_lock_irq(&ac_timers[cpu].lock);
- }
-
- /* Heap may have grown while the lock was released. */
- heap = ac_timers[cpu].heap;
- }
- }
- while ( !reprogram_ac_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
-
- spin_unlock_irq(&ac_timers[cpu].lock);
-}
-
-
-static void dump_timerq(unsigned char key)
-{
- struct ac_timer *t;
- unsigned long flags;
- s_time_t now = NOW();
- int i, j;
-
- printk("Dumping ac_timer queues: NOW=0x%08X%08X\n",
- (u32)(now>>32), (u32)now);
-
- for_each_online_cpu( i )
- {
- printk("CPU[%02d] ", i);
- spin_lock_irqsave(&ac_timers[i].lock, flags);
- for ( j = 1; j <= GET_HEAP_SIZE(ac_timers[i].heap); j++ )
- {
- t = ac_timers[i].heap[j];
- printk (" %d : %p ex=0x%08X%08X %p\n",
- j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
- }
- spin_unlock_irqrestore(&ac_timers[i].lock, flags);
- printk("\n");
- }
-}
-
-
-void __init ac_timer_init(void)
-{
- static struct ac_timer *dummy_heap;
- int i;
-
- open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action);
-
- /*
- * All CPUs initially share an empty dummy heap. Only those CPUs that
- * are brought online will be dynamically allocated their own heap.
- */
- SET_HEAP_SIZE(&dummy_heap, 0);
- SET_HEAP_LIMIT(&dummy_heap, 0);
-
- for ( i = 0; i < NR_CPUS; i++ )
- {
- spin_lock_init(&ac_timers[i].lock);
- ac_timers[i].heap = &dummy_heap;
- }
-
- register_keyhandler('a', dump_timerq, "dump ac_timer queues");
-}
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/perfc.h>
#include <xen/sched-if.h>
#include <xen/softirq.h>
limits*/
s32 warp_value; /* virtual time warp */
s_time_t warpl; /* warp limit */
- struct ac_timer warp_timer; /* deals with warpl */
+ struct timer warp_timer; /* deals with warpl */
s_time_t warpu; /* unwarp time requirement */
- struct ac_timer unwarp_timer; /* deals with warpu */
+ struct timer unwarp_timer; /* deals with warpu */
struct bvt_vcpu_info vcpu_inf[MAX_VIRT_CPUS];
};
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
- set_ac_timer(&inf->unwarp_timer, NOW() + inf->warpu);
+ set_timer(&inf->unwarp_timer, NOW() + inf->warpu);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
}
inf->warpl = MILLISECS(2000);
inf->warpu = MILLISECS(1000);
/* Initialise the warp timers. */
- init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
- init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
+ init_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
+ init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
}
einf->vcpu = v;
if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
else if ( schedule_data[cpu].s_timer.expires > r_time )
- set_ac_timer(&schedule_data[cpu].s_timer, r_time);
+ set_timer(&schedule_data[cpu].s_timer, r_time);
}
inf->warpu = MILLISECS(warpu);
/* If the unwarp timer set up it needs to be removed */
- rem_ac_timer(&inf->unwarp_timer);
+ stop_timer(&inf->unwarp_timer);
/* If we stop warping the warp timer needs to be removed */
if ( !warpback )
- rem_ac_timer(&inf->warp_timer);
+ stop_timer(&inf->warp_timer);
}
else if ( cmd->direction == SCHED_INFO_GET )
{
prev_einf->evt = calc_evt(prev, prev_einf->avt);
if(prev_inf->warpback && prev_inf->warpl > 0)
- rem_ac_timer(&prev_inf->warp_timer);
+ stop_timer(&prev_inf->warp_timer);
__del_from_runqueue(prev);
}
if ( next_einf->inf->warp && next_einf->inf->warpl > 0 )
- set_ac_timer(&next_einf->inf->warp_timer, now + next_einf->inf->warpl);
+ set_timer(&next_einf->inf->warp_timer, now + next_einf->inf->warpl);
/* Extract the domain pointers from the dom infos */
next = next_einf->vcpu;
#include <xen/sched.h>
#include <xen/sched-if.h>
#include <public/sched_ctl.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/softirq.h>
#include <xen/time.h>
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/perfc.h>
#include <xen/sched-if.h>
#include <xen/softirq.h>
: (typeof(ops.fn(__VA_ARGS__)))0 )
/* Per-CPU periodic timer sends an event to the currently-executing domain. */
-static struct ac_timer t_timer[NR_CPUS];
+static struct timer t_timer[NR_CPUS];
void free_domain(struct domain *d)
{
void sched_add_domain(struct vcpu *v)
{
/* Initialise the per-domain timer. */
- init_ac_timer(&v->timer, dom_timer_fn, v, v->processor);
+ init_timer(&v->timer, dom_timer_fn, v, v->processor);
if ( is_idle_vcpu(v) )
{
void sched_rem_domain(struct vcpu *v)
{
- rem_ac_timer(&v->timer);
+ stop_timer(&v->timer);
SCHED_OP(rem_task, v);
TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
}
struct vcpu *v = current;
if ( timeout == 0 )
- rem_ac_timer(&v->timer);
+ stop_timer(&v->timer);
else
- set_ac_timer(&v->timer, timeout);
+ set_timer(&v->timer, timeout);
return 0;
}
spin_lock_irq(&schedule_data[cpu].schedule_lock);
- rem_ac_timer(&schedule_data[cpu].s_timer);
+ stop_timer(&schedule_data[cpu].s_timer);
prev->cpu_time += now - prev->lastschd;
next->lastschd = now;
- set_ac_timer(&schedule_data[cpu].s_timer, now + r_time);
+ set_timer(&schedule_data[cpu].s_timer, now + r_time);
if ( unlikely(prev == next) )
{
page_scrub_schedule_work();
- set_ac_timer(&t_timer[cpu], NOW() + MILLISECS(10));
+ set_timer(&t_timer[cpu], NOW() + MILLISECS(10));
}
/* Domain timer function, sends a virtual timer interrupt to domain */
for ( i = 0; i < NR_CPUS; i++ )
{
spin_lock_init(&schedule_data[i].schedule_lock);
- init_ac_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
- init_ac_timer(&t_timer[i], t_timer_fn, NULL, i);
+ init_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
+ init_timer(&t_timer[i], t_timer_fn, NULL, i);
}
for ( i = 0; schedulers[i] != NULL; i++ )
#include <xen/config.h>
#include <xen/mm.h>
#include <xen/spinlock.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/cache.h>
#include <xen/prefetch.h>
/* UART with IRQ line: interrupt-driven I/O. */
struct irqaction irqaction;
/* UART with no IRQ line: periodically-polled I/O. */
- struct ac_timer timer;
+ struct timer timer;
unsigned int timeout_ms;
} ns16550_com[2] = { { 0 } };
if ( ns_read_reg(uart, LSR) & LSR_THRE )
serial_tx_interrupt(port, regs);
- set_ac_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
+ set_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
}
static int ns16550_tx_empty(struct serial_port *port)
bits = uart->data_bits + uart->stop_bits + !!uart->parity;
uart->timeout_ms = max_t(
unsigned int, 1, (bits * port->tx_fifo_size * 1000) / uart->baud);
- init_ac_timer(&uart->timer, ns16550_poll, port, 0);
- set_ac_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
+ init_timer(&uart->timer, ns16550_poll, port, 0);
+ set_timer(&uart->timer, NOW() + MILLISECS(uart->timeout_ms));
}
else
{
#define platform_outl __ia64_outl
// FIXME: This just overrides a use in a typedef (not allowed in ia64,
-// or maybe just in older gcc's?) used in ac_timer.c but should be OK
+// or maybe just in older gcc's?) used in timer.c but should be OK
// (and indeed is probably required!) elsewhere
#undef __cacheline_aligned
#undef ____cacheline_aligned
#ifndef _VTM_H_
#define _VTM_H_
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/types.h>
#define MAX_JUMP_STEP (5000) /* 500ms, max jump step */
uint64_t cfg_max_jump; // max jump within one time suspendsion
uint64_t cfg_min_grun; // min guest running time since last jump
// uint64_t latest_read_itc; // latest guest read ITC
- struct ac_timer vtm_timer;
+ struct timer vtm_timer;
// int triggered;
uint32_t timer_current;
uint32_t timer_divconf;
uint32_t timer_divide_counter;
- struct ac_timer vlapic_timer;
+ struct timer vlapic_timer;
int intr_pending_count[MAX_VECTOR];
s_time_t timer_current_update;
uint32_t icr_high;
static inline int vlapic_timer_active(struct vlapic *vlapic)
{
- return active_ac_timer(&(vlapic->vlapic_timer));
+ return active_timer(&(vlapic->vlapic_timer));
}
int vlapic_find_highest_irr(struct vlapic *vlapic);
void *io_bitmap_a, *io_bitmap_b;
struct vlapic *vlapic;
u64 tsc_offset;
- struct ac_timer hlt_timer; /* hlt ins emulation wakeup timer */
+ struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
#define vmx_schedule_tail(next) \
#include <xen/lib.h>
#include <xen/time.h>
#include <xen/errno.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <asm/vmx_vmcs.h>
#include <asm/vmx_vpic.h>
u64 inject_point; /* the time inject virt intr */
u64 shift; /* save the value of offset - drift */
s_time_t scheduled; /* scheduled timer interrupt */
- struct ac_timer pit_timer; /* periodic timer for mode 2*/
+ struct timer pit_timer; /* periodic timer for mode 2*/
unsigned int channel; /* the pit channel, counter 0~2 */
unsigned int pending_intr_nr; /* the couner for pending timer interrupts */
u32 period; /* pit frequency in ns */
+++ /dev/null
-/******************************************************************************
- * ac_timer.h
- *
- * Copyright (c) 2002-2003 Rolf Neugebauer
- * Copyright (c) 2002-2005 K A Fraser
- */
-
-#ifndef _AC_TIMER_H_
-#define _AC_TIMER_H_
-
-#include <xen/spinlock.h>
-#include <xen/time.h>
-#include <xen/string.h>
-
-struct ac_timer {
- /* System time expiry value (nanoseconds since boot). */
- s_time_t expires;
- /* CPU on which this timer will be installed and executed. */
- unsigned int cpu;
- /* On expiry, '(*function)(data)' will be executed in softirq context. */
- void (*function)(void *);
- void *data;
- /* Timer-heap offset. */
- unsigned int heap_offset;
-};
-
-/*
- * All functions below can be called for any CPU from any CPU in any context.
- */
-
-/* Returns TRUE if the given timer is on a timer list. */
-static __inline__ int active_ac_timer(struct ac_timer *timer)
-{
- return (timer->heap_offset != 0);
-}
-
-/*
- * It initialises the static fields of the ac_timer structure.
- * It can be called multiple times to reinitialise a single (inactive) timer.
- */
-static __inline__ void init_ac_timer(
- struct ac_timer *timer,
- void (*function)(void *),
- void *data,
- unsigned int cpu)
-{
- memset(timer, 0, sizeof(*timer));
- timer->function = function;
- timer->data = data;
- timer->cpu = cpu;
-}
-
-/*
- * Set the expiry time and activate a timer (which must previously have been
- * initialised by init_ac_timer).
- */
-extern void set_ac_timer(struct ac_timer *timer, s_time_t expires);
-
-/*
- * Deactivate a timer (which must previously have been initialised by
- * init_ac_timer). This function has no effect if the timer is not currently
- * active.
- */
-extern void rem_ac_timer(struct ac_timer *timer);
-
-/*
- * Initialisation. Must be called before any other ac_timer function.
- */
-extern void ac_timer_init(void);
-
-#endif /* _AC_TIMER_H_ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
PERFCOUNTER_CPU(irq_time, "cycles spent in irq handler")
PERFCOUNTER_CPU(apic_timer, "apic timer interrupts")
-PERFCOUNTER_CPU(ac_timer_max, "ac_timer max error (ns)")
+PERFCOUNTER_CPU(timer_max, "timer max error (ns)")
PERFCOUNTER_CPU(sched_irq, "sched: timer")
PERFCOUNTER_CPU(sched_run, "sched: runs through scheduler")
PERFCOUNTER_CPU(sched_ctx, "sched: context switches")
struct vcpu *curr; /* current task */
struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
- struct ac_timer s_timer; /* scheduling timer */
+ struct timer s_timer; /* scheduling timer */
unsigned long tick; /* current periodic 'tick' */
#ifdef BUCKETS
u32 hist[BUCKETS]; /* for scheduler latency histogram */
#include <public/xen.h>
#include <public/dom0_ops.h>
#include <xen/time.h>
-#include <xen/ac_timer.h>
+#include <xen/timer.h>
#include <xen/grant_table.h>
#include <xen/rangeset.h>
#include <asm/domain.h>
struct vcpu *next_in_list;
- struct ac_timer timer; /* one-shot timer for timeout values */
+ struct timer timer; /* one-shot timer for timeout values */
unsigned long sleep_tick; /* tick at which this vcpu started sleep */
s_time_t lastschd; /* time this domain was last scheduled */
#define __XEN_SOFTIRQ_H__
/* Common softirqs come first in the following list. */
-#define AC_TIMER_SOFTIRQ 0
+#define TIMER_SOFTIRQ 0
#define SCHEDULE_SOFTIRQ 1
#define NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ 2
#define KEYPRESS_SOFTIRQ 3