*/
#define __CSFLAG_runq_migrate_request 3
#define CSFLAG_runq_migrate_request (1<<__CSFLAG_runq_migrate_request)
-
+/*
+ * CSFLAG_vcpu_yield: this vcpu was running, and has called vcpu_yield(). The
+ * scheduler is invoked to see if we can give the cpu to someone else, and
+ * get back to the yielding vcpu in a while.
+ */
+#define __CSFLAG_vcpu_yield 4
+#define CSFLAG_vcpu_yield (1<<__CSFLAG_vcpu_yield)
static unsigned int __read_mostly opt_migrate_resist = 500;
integer_param("sched_credit2_migrate_resist", opt_migrate_resist);
return;
}
+static void
+csched2_vcpu_yield(const struct scheduler *ops, struct vcpu *v)
+{
+ struct csched2_vcpu * const svc = CSCHED2_VCPU(v);
+
+ __set_bit(__CSFLAG_vcpu_yield, &svc->flags);
+}
+
static void
csched2_context_saved(const struct scheduler *ops, struct vcpu *vc)
{
struct list_head *iter;
struct csched2_vcpu *snext = NULL;
struct csched2_private *prv = CSCHED2_PRIV(per_cpu(scheduler, cpu));
+ bool yield = __test_and_clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
*skipped = 0;
- /* Default to current if runnable, idle otherwise */
- if ( vcpu_runnable(scurr->vcpu) )
- snext = scurr;
- else
- snext = CSCHED2_VCPU(idle_vcpu[cpu]);
-
/*
* Return the current vcpu if it has executed for less than ratelimit.
* Adjuststment for the selected vcpu's credit and decision
* for how long it will run will be taken in csched2_runtime.
+ *
+ * Note that, if scurr is yielding, we don't let rate limiting kick in.
+ * In fact, it may be the case that scurr is about to spin, and there's
+ * no point forcing it to do so until rate limiting expires.
*/
- if ( prv->ratelimit_us && !is_idle_vcpu(scurr->vcpu) &&
+ if ( !yield && prv->ratelimit_us && !is_idle_vcpu(scurr->vcpu) &&
vcpu_runnable(scurr->vcpu) &&
(now - scurr->vcpu->runstate.state_entry_time) <
MICROSECS(prv->ratelimit_us) )
return scurr;
+ /* Default to current if runnable, idle otherwise */
+ if ( vcpu_runnable(scurr->vcpu) )
+ snext = scurr;
+ else
+ snext = CSCHED2_VCPU(idle_vcpu[cpu]);
+
list_for_each( iter, &rqd->runq )
{
struct csched2_vcpu * svc = list_entry(iter, struct csched2_vcpu, runq_elem);
continue;
}
- /* If this is on a different processor, don't pull it unless
- * its credit is at least CSCHED2_MIGRATE_RESIST higher. */
+ /*
+ * If this is on a different processor, don't pull it unless
+ * its credit is at least CSCHED2_MIGRATE_RESIST higher.
+ */
if ( svc->vcpu->processor != cpu
&& snext->credit + CSCHED2_MIGRATE_RESIST > svc->credit )
{
continue;
}
- /* If the next one on the list has more credit than current
- * (or idle, if current is not runnable), choose it. */
- if ( svc->credit > snext->credit )
+ /*
+ * If the next one on the list has more credit than current
+ * (or idle, if current is not runnable), or if current is
+ * yielding, choose it.
+ */
+ if ( yield || svc->credit > snext->credit )
snext = svc;
/* In any case, if we got this far, break. */
*/
if ( tasklet_work_scheduled )
{
- trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
+ __clear_bit(__CSFLAG_vcpu_yield, &scurr->flags);
+ trace_var(TRC_CSCHED2_SCHED_TASKLET, 1, 0, NULL);
snext = CSCHED2_VCPU(idle_vcpu[cpu]);
}
else
.sleep = csched2_vcpu_sleep,
.wake = csched2_vcpu_wake,
+ .yield = csched2_vcpu_yield,
.adjust = csched2_dom_cntl,
.adjust_global = csched2_sys_cntl,