struct list_head runq; /* ordered list of runnable vcpus */
struct list_head depletedq; /* unordered list of depleted vcpus */
- struct timer *repl_timer; /* replenishment timer */
+ struct timer repl_timer; /* replenishment timer */
struct list_head replq; /* ordered list of vcpus that need replenishment */
cpumask_t tickled; /* cpus been tickled */
if ( !list_empty(replq) )
{
struct rt_vcpu *svc_next = replq_elem(replq->next);
- set_timer(prv->repl_timer, svc_next->cur_deadline);
+ set_timer(&prv->repl_timer, svc_next->cur_deadline);
}
else
- stop_timer(prv->repl_timer);
+ stop_timer(&prv->repl_timer);
}
}
* at the front of the event list.
*/
if ( deadline_replq_insert(svc, &svc->replq_elem, replq) )
- set_timer(prv->repl_timer, svc->cur_deadline);
+ set_timer(&prv->repl_timer, svc->cur_deadline);
}
/*
rearm = deadline_replq_insert(svc, &svc->replq_elem, replq);
if ( rearm )
- set_timer(rt_priv(ops)->repl_timer, rearm_svc->cur_deadline);
+ set_timer(&rt_priv(ops)->repl_timer, rearm_svc->cur_deadline);
}
/*
if ( prv == NULL )
goto err;
- prv->repl_timer = xzalloc(struct timer);
- if ( prv->repl_timer == NULL )
- goto err;
-
spin_lock_init(&prv->lock);
INIT_LIST_HEAD(&prv->sdom);
INIT_LIST_HEAD(&prv->runq);
INIT_LIST_HEAD(&prv->depletedq);
INIT_LIST_HEAD(&prv->replq);
- cpumask_clear(&prv->tickled);
-
ops->sched_data = prv;
rc = 0;
err:
- if ( rc && prv )
- {
- xfree(prv->repl_timer);
+ if ( rc )
xfree(prv);
- }
return rc;
}
{
struct rt_private *prv = rt_priv(ops);
- ASSERT(prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed);
- xfree(prv->repl_timer);
+ ASSERT(prv->repl_timer.status == TIMER_STATUS_invalid ||
+ prv->repl_timer.status == TIMER_STATUS_killed);
ops->sched_data = NULL;
xfree(prv);
* TIMER_STATUS_invalid means we are the first cpu that sees the timer
* allocated but not initialized, and so it's up to us to initialize it.
*/
- if ( prv->repl_timer->status == TIMER_STATUS_invalid )
+ if ( prv->repl_timer.status == TIMER_STATUS_invalid )
{
- init_timer(prv->repl_timer, repl_timer_handler, (void*) ops, cpu);
+ init_timer(&prv->repl_timer, repl_timer_handler, (void *)ops, cpu);
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
* removed (in which case we'll see TIMER_STATUS_killed), it's our
* job to (re)initialize the timer.
*/
- if ( prv->repl_timer->status == TIMER_STATUS_invalid ||
- prv->repl_timer->status == TIMER_STATUS_killed )
+ if ( prv->repl_timer.status == TIMER_STATUS_invalid ||
+ prv->repl_timer.status == TIMER_STATUS_killed )
{
- init_timer(prv->repl_timer, repl_timer_handler, (void*) new_ops, cpu);
+ init_timer(&prv->repl_timer, repl_timer_handler, (void *)new_ops, cpu);
dprintk(XENLOG_DEBUG, "RTDS: timer initialized on cpu %u\n", cpu);
}
spin_lock_irqsave(&prv->lock, flags);
- if ( prv->repl_timer->cpu == cpu )
+ if ( prv->repl_timer.cpu == cpu )
{
struct cpupool *c = per_cpu(cpupool, cpu);
unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
*/
if ( new_cpu >= nr_cpu_ids )
{
- kill_timer(prv->repl_timer);
+ kill_timer(&prv->repl_timer);
dprintk(XENLOG_DEBUG, "RTDS: timer killed on cpu %d\n", cpu);
}
else
{
- migrate_timer(prv->repl_timer, new_cpu);
+ migrate_timer(&prv->repl_timer, new_cpu);
}
}
struct rt_private *prv = rt_priv(ops);
struct list_head *replq = rt_replq(ops);
struct list_head *runq = rt_runq(ops);
- struct timer *repl_timer = prv->repl_timer;
struct list_head *iter, *tmp;
struct rt_vcpu *svc;
LIST_HEAD(tmp_replq);
* the one in the front.
*/
if ( !list_empty(replq) )
- set_timer(repl_timer, replq_elem(replq->next)->cur_deadline);
+ set_timer(&prv->repl_timer, replq_elem(replq->next)->cur_deadline);
spin_unlock_irq(&prv->lock);
}