destroy_vmcs(&v->arch.arch_vmx);
free_monitor_pagetable(v);
vpit = &v->domain->arch.vmx_platform.vmx_pit;
- if ( active_timer(&(vpit->pit_timer)) )
- stop_timer(&vpit->pit_timer);
- if ( active_timer(&v->arch.arch_vmx.hlt_timer) )
- stop_timer(&v->arch.arch_vmx.hlt_timer);
+ kill_timer(&vpit->pit_timer);
+ kill_timer(&v->arch.arch_vmx.hlt_timer);
if ( vmx_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
- stop_timer(&VLAPIC(v)->vlapic_timer);
+ kill_timer(&VLAPIC(v)->vlapic_timer);
xfree(VLAPIC(v));
}
}
limits*/
s32 warp_value; /* virtual time warp */
s_time_t warpl; /* warp limit */
- struct timer warp_timer; /* deals with warpl */
+ struct timer warp_timer; /* deals with warpl */
s_time_t warpu; /* unwarp time requirement */
- struct timer unwarp_timer; /* deals with warpu */
+ struct timer unwarp_timer; /* deals with warpu */
struct bvt_vcpu_info vcpu_inf[MAX_VIRT_CPUS];
};
static int bvt_alloc_task(struct vcpu *v)
{
struct domain *d = v->domain;
+ struct bvt_dom_info *inf;
if ( (d->sched_priv == NULL) )
{
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
- v->sched_priv = &BVT_INFO(d)->vcpu_inf[v->vcpu_id];
+ inf = BVT_INFO(d);
+
+ v->sched_priv = &inf->vcpu_inf[v->vcpu_id];
- BVT_INFO(d)->vcpu_inf[v->vcpu_id].inf = BVT_INFO(d);
- BVT_INFO(d)->vcpu_inf[v->vcpu_id].vcpu = v;
+ inf->vcpu_inf[v->vcpu_id].inf = BVT_INFO(d);
+ inf->vcpu_inf[v->vcpu_id].vcpu = v;
+
+ if ( v->vcpu_id == 0 )
+ {
+ inf->mcu_advance = MCU_ADVANCE;
+ inf->domain = v->domain;
+ inf->warpback = 0;
+ /* Set some default values here. */
+ inf->warp = 0;
+ inf->warp_value = 0;
+ inf->warpl = MILLISECS(2000);
+ inf->warpu = MILLISECS(1000);
+ /* Initialise the warp timers. */
+ init_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
+ init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
+ }
return 0;
}
{
struct bvt_dom_info *inf = BVT_INFO(v->domain);
struct bvt_vcpu_info *einf = EBVT_INFO(v);
+
ASSERT(inf != NULL);
ASSERT(v != NULL);
CPU_SVT(v->processor) = 0;
}
- if ( v->vcpu_id == 0 )
- {
- inf->mcu_advance = MCU_ADVANCE;
- inf->domain = v->domain;
- inf->warpback = 0;
- /* Set some default values here. */
- inf->warp = 0;
- inf->warp_value = 0;
- inf->warpl = MILLISECS(2000);
- inf->warpu = MILLISECS(1000);
- /* Initialise the warp timers. */
- init_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
- init_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
- }
-
- einf->vcpu = v;
-
if ( is_idle_vcpu(v) )
{
einf->avt = einf->evt = ~0U;
*/
static void bvt_free_task(struct domain *d)
{
- ASSERT(d->sched_priv != NULL);
- xfree(d->sched_priv);
+ struct bvt_dom_info *inf = BVT_INFO(d);
+
+ ASSERT(inf != NULL);
+
+ kill_timer(&inf->warp_timer);
+ kill_timer(&inf->unwarp_timer);
+
+ xfree(inf);
}
/* Control the scheduler. */
void sched_rem_domain(struct vcpu *v)
{
- stop_timer(&v->timer);
+ kill_timer(&v->timer);
SCHED_OP(rem_task, v);
TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
}
/* Periodic tick timer: send timer event to current domain */
static void t_timer_fn(void *unused)
{
- struct vcpu *v = current;
+ struct vcpu *v = current;
unsigned int cpu = smp_processor_id();
schedule_data[cpu].tick++;
struct timers {
spinlock_t lock;
struct timer **heap;
+ struct timer *running;
} __cacheline_aligned;
struct timers timers[NR_CPUS];
unsigned long flags;
spin_lock_irqsave(&timers[cpu].lock, flags);
- ASSERT(timer != NULL);
if ( active_timer(timer) )
__stop_timer(timer);
timer->expires = expires;
- __add_timer(timer);
+ if ( likely(!timer->killed) )
+ __add_timer(timer);
spin_unlock_irqrestore(&timers[cpu].lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&timers[cpu].lock, flags);
- ASSERT(timer != NULL);
if ( active_timer(timer) )
__stop_timer(timer);
spin_unlock_irqrestore(&timers[cpu].lock, flags);
}
+void kill_timer(struct timer *timer)
+{
+ int cpu = timer->cpu;
+ unsigned long flags;
+
+ BUG_ON(timers[cpu].running == timer);
+
+ spin_lock_irqsave(&timers[cpu].lock, flags);
+ if ( active_timer(timer) )
+ __stop_timer(timer);
+ timer->killed = 1;
+ spin_unlock_irqrestore(&timers[cpu].lock, flags);
+
+ for_each_online_cpu ( cpu )
+ while ( timers[cpu].running == timer )
+ cpu_relax();
+}
+
+
static void timer_softirq_action(void)
{
int cpu = smp_processor_id();
{
remove_entry(heap, t);
+ timers[cpu].running = t;
+
fn = t->function;
data = t->data;
- if ( fn != NULL )
- {
- spin_unlock_irq(&timers[cpu].lock);
- (*fn)(data);
- spin_lock_irq(&timers[cpu].lock);
- }
+ spin_unlock_irq(&timers[cpu].lock);
+ (*fn)(data);
+ spin_lock_irq(&timers[cpu].lock);
/* Heap may have grown while the lock was released. */
heap = timers[cpu].heap;
}
+
+ timers[cpu].running = NULL;
}
while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
void *data;
/* Timer-heap offset. */
unsigned int heap_offset;
+ /* Has this timer been killed (cannot be activated)? */
+ int killed;
};
/*
*/
extern void stop_timer(struct timer *timer);
+/*
+ * Deactivate a timer and prevent it from being re-set (future calls to
+ * set_timer will silently fail). When this function returns it is guaranteed
+ * that the timer callback handler is not running on any CPU.
+ */
+extern void kill_timer(struct timer *timer);
+
/*
* Initialisation. Must be called before any other timer function.
*/