static void
csched_free_vdata(const struct scheduler *ops, void *priv)
{
- struct csched_private *prv = CSCHED_PRIV(ops);
struct csched_vcpu *svc = priv;
- unsigned long flags;
-
- if ( __vcpu_on_runq(svc) )
- __runq_remove(svc);
-
- spin_lock_irqsave(&(prv->lock), flags);
- if ( !list_empty(&svc->active_vcpu_elem) )
- __csched_vcpu_acct_stop_locked(prv, svc);
-
- spin_unlock_irqrestore(&(prv->lock), flags);
+ BUG_ON( !list_empty(&svc->runq_elem) );
xfree(svc);
}
static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
{
+ struct csched_private *prv = CSCHED_PRIV(ops);
struct csched_vcpu * const svc = CSCHED_VCPU(vc);
struct csched_dom * const sdom = svc->sdom;
+ unsigned long flags;
CSCHED_STAT_CRANK(vcpu_destroy);
+ if ( __vcpu_on_runq(svc) )
+ __runq_remove(svc);
+
+ spin_lock_irqsave(&(prv->lock), flags);
+
+ if ( !list_empty(&svc->active_vcpu_elem) )
+ __csched_vcpu_acct_stop_locked(prv, svc);
+
+ spin_unlock_irqrestore(&(prv->lock), flags);
+
BUG_ON( sdom == NULL );
BUG_ON( !list_empty(&svc->runq_elem) );
-
- csched_free_vdata(ops, svc);
}
static void
.destroy_domain = csched_dom_destroy,
.insert_vcpu = csched_vcpu_insert,
- .destroy_vcpu = csched_vcpu_destroy,
+ .remove_vcpu = csched_vcpu_remove,
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
csched_free_vdata(const struct scheduler *ops, void *priv)
{
struct csched_vcpu *svc = priv;
- struct vcpu *vc = svc->vcpu;
+
+ xfree(svc);
+}
+
+static void
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
+ struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_dom * const sdom = svc->sdom;
+
+ BUG_ON( sdom == NULL );
+ BUG_ON( !list_empty(&svc->runq_elem) );
if ( ! is_idle_vcpu(vc) )
{
svc->sdom->nr_vcpus--;
}
-
- xfree(svc);
-}
-
-static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
-{
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
- struct csched_dom * const sdom = svc->sdom;
-
- BUG_ON( sdom == NULL );
- BUG_ON( !list_empty(&svc->runq_elem) );
-
- csched_free_vdata(ops, svc);
}
static void
.destroy_domain = csched_dom_destroy,
.insert_vcpu = csched_vcpu_insert,
- .destroy_vcpu = csched_vcpu_destroy,
+ .remove_vcpu = csched_vcpu_remove,
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
xfree(priv);
}
-static void sedf_destroy_vcpu(const struct scheduler *ops, struct vcpu *v)
-{
- sedf_free_vdata(ops, v->sched_priv);
-}
-
static void *
sedf_alloc_domdata(const struct scheduler *ops, struct domain *d)
{
.init_domain = sedf_init_domain,
.destroy_domain = sedf_destroy_domain,
- .destroy_vcpu = sedf_destroy_vcpu,
-
.alloc_vdata = sedf_alloc_vdata,
.free_vdata = sedf_free_vdata,
.alloc_pdata = sedf_alloc_pdata,
if ( v->sched_priv == NULL )
return 1;
+ SCHED_OP(VCPU2OP(v), insert_vcpu, v);
+
return 0;
}
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+ SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+ SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
cpus_setall(v->cpu_affinity);
v->processor = new_p;
evtchn_move_pirqs(v);
new_p = cycle_cpu(new_p, c->cpu_valid);
+
+ SCHED_OP(VCPU2OP(v), insert_vcpu, v);
}
domain_update_node_affinity(d);
kill_timer(&v->poll_timer);
if ( test_and_clear_bool(v->is_urgent) )
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
- SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+ SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+ SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
}
int sched_init_domain(struct domain *d)
int (*init_domain) (const struct scheduler *, struct domain *);
void (*destroy_domain) (const struct scheduler *, struct domain *);
+ /* Activate / deactivate vcpus in a cpu pool */
void (*insert_vcpu) (const struct scheduler *, struct vcpu *);
- void (*destroy_vcpu) (const struct scheduler *, struct vcpu *);
+ void (*remove_vcpu) (const struct scheduler *, struct vcpu *);
void (*sleep) (const struct scheduler *, struct vcpu *);
void (*wake) (const struct scheduler *, struct vcpu *);