*/
vmx_irq_exit();
if ( wake_dom0 && current != dom0 )
- domain_wake(dom0->vcpu[0]);
+ vcpu_wake(dom0->vcpu[0]);
}
#endif
+ */
+ vmx_irq_exit();
+ if ( wake_dom0 && current != dom0 )
-+ domain_wake(dom0->vcpu[0]);
++ vcpu_wake(dom0->vcpu[0]);
+}
+#endif
+
+#endif
+ //FIXME: TEMPORARY HACK!!!!
+ vcpu_pend_interrupt(dom0->vcpu[0],vector);
-+ domain_wake(dom0->vcpu[0]);
++ vcpu_wake(dom0->vcpu[0]);
+ }
+ else
+#endif
+ if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
+ vcpu_pend_timer(dom0->vcpu[0]);
+ //vcpu_set_next_timer(dom0->vcpu[0]);
-+ domain_wake(dom0->vcpu[0]);
++ vcpu_wake(dom0->vcpu[0]);
+ }
+ if (!is_idle_task(current->domain) && current->domain != dom0) {
+ if (vcpu_timer_expired(current)) {
+ vcpu_pend_timer(current);
+ // ensure another timer interrupt happens even if domain doesn't
+ vcpu_set_next_timer(current);
-+ domain_wake(current);
++ vcpu_wake(current);
+ }
+ }
+ raise_actimer_softirq();
#endif
//FIXME: TEMPORARY HACK!!!!
vcpu_pend_interrupt(dom0->vcpu[0],vector);
- domain_wake(dom0->vcpu[0]);
+ vcpu_wake(dom0->vcpu[0]);
return(1);
}
return(0);
/* calls in xen/common code that are unused on ia64 */
void sync_lazy_execstate_cpu(unsigned int cpu) {}
-void sync_lazy_execstate_mask(cpumask_t mask) {}
-void sync_lazy_execstate_all(void) {}
#ifdef CONFIG_VTI
int grant_table_create(struct domain *d) { return 0; }
if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
vcpu_pend_timer(dom0->vcpu[0]);
//vcpu_set_next_timer(dom0->vcpu[0]);
- domain_wake(dom0->vcpu[0]);
+ vcpu_wake(dom0->vcpu[0]);
}
if (!is_idle_task(current->domain) && current->domain != dom0) {
if (vcpu_timer_expired(current)) {
vcpu_pend_timer(current);
// ensure another timer interrupt happens even if domain doesn't
vcpu_set_next_timer(current);
- domain_wake(current);
+ vcpu_wake(current);
}
}
raise_actimer_softirq();
if ( d != current->domain )
domain_pause(d);
- sync_lazy_execstate_all();
// Maybe we should just be using BIGLOCK?
//
void sync_lazy_execstate_cpu(unsigned int cpu)
{
if ( cpu == smp_processor_id() )
+ {
(void)__sync_lazy_execstate();
+ }
else
+ {
+ /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
flush_tlb_mask(cpumask_of_cpu(cpu));
-}
-
-void sync_lazy_execstate_mask(cpumask_t mask)
-{
- if ( cpu_isset(smp_processor_id(), mask) )
- (void)__sync_lazy_execstate();
- /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(mask);
-}
-
-void sync_lazy_execstate_all(void)
-{
- __sync_lazy_execstate();
- /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(cpu_online_map);
+ }
}
unsigned long __hypercall_create_continuation(
/* Make sure that every vcpu is descheduled before we finalise. */
for_each_vcpu ( d, v )
- while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
- cpu_relax();
-
- sync_lazy_execstate_mask(d->cpumask);
+ vcpu_sleep_sync(v);
BUG_ON(!cpus_empty(d->cpumask));
sync_pagetable_state(d);
/* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
for_each_vcpu ( d, v )
- domain_sleep_nosync(v);
+ vcpu_sleep_nosync(v);
}
for_each_vcpu ( d, v )
{
set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
- domain_sleep_nosync(v);
+ vcpu_sleep_nosync(v);
}
send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
{
BUG_ON(v == current);
atomic_inc(&v->pausecnt);
- domain_sleep_sync(v);
+ vcpu_sleep_sync(v);
}
void domain_pause(struct domain *d)
{
BUG_ON(v == current);
atomic_inc(&v->pausecnt);
- domain_sleep_sync(v);
+ vcpu_sleep_sync(v);
}
}
{
BUG_ON(v == current);
if ( atomic_dec_and_test(&v->pausecnt) )
- domain_wake(v);
+ vcpu_wake(v);
}
void domain_unpause(struct domain *d)
{
BUG_ON(v == current);
if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
- domain_sleep_sync(v);
+ vcpu_sleep_sync(v);
}
}
for_each_vcpu ( d, v )
{
if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
- domain_wake(v);
+ vcpu_wake(v);
}
}
/* domain_unpause_by_systemcontroller */
if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
- domain_wake(v);
+ vcpu_wake(v);
xfree(c);
return 0;
TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
}
-void domain_sleep_nosync(struct vcpu *v)
+void vcpu_sleep_nosync(struct vcpu *v)
{
unsigned long flags;
TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
}
-void domain_sleep_sync(struct vcpu *v)
+void vcpu_sleep_sync(struct vcpu *v)
{
- domain_sleep_nosync(v);
-
- while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) )
+ vcpu_sleep_nosync(v);
+
+ /*
+ * We can be sure that the VCPU is finally descheduled after the running
+ * flag is cleared and the scheduler lock is released.
+ */
+ while ( test_bit(_VCPUF_running, &v->vcpu_flags)
+ && !domain_runnable(v)
+ && spin_is_locked(&schedule_data[v->processor].schedule_lock) )
cpu_relax();
+ /* Counteract lazy context switching. */
if ( cpu_isset(v->processor, v->domain->cpumask) )
sync_lazy_execstate_cpu(v->processor);
}
-void domain_wake(struct vcpu *v)
+void vcpu_wake(struct vcpu *v)
{
unsigned long flags;
return -ESRCH;
clear_bit(_VCPUF_down, &target->vcpu_flags);
/* wake vcpu */
- domain_wake(target);
+ vcpu_wake(target);
return 0;
}
}
}
}
- } while (!succ);
- //spin_lock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
+ } while ( !succ );
+
SCHED_OP(adjdom, d, cmd);
- //spin_unlock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
+
for (cpu = 0; cpu < NR_CPUS; cpu++)
if (__get_cpu_bit(cpu, have_lock))
spin_unlock(&schedule_data[cpu].schedule_lock);
perfc_incrc(sched_ctx);
#if defined(WAKE_HISTO)
- if ( !is_idle_task(next->domain) && next->wokenup ) {
+ if ( !is_idle_task(next->domain) && next->wokenup )
+ {
ulong diff = (ulong)(now - next->wokenup);
diff /= (ulong)MILLISECS(1);
if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++;
void propagate_page_fault(unsigned long addr, u16 error_code);
+extern int __sync_lazy_execstate(void);
+
/*
* Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must
* hold a reference to the page.
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
int sched_id();
-void domain_wake(struct vcpu *d);
-void domain_sleep_nosync(struct vcpu *d);
-void domain_sleep_sync(struct vcpu *d);
+void vcpu_wake(struct vcpu *d);
+void vcpu_sleep_nosync(struct vcpu *d);
+void vcpu_sleep_sync(struct vcpu *d);
/*
- * Force loading of currently-executing domain state on the specified set
- * of CPUs. This is used to counteract lazy state switching where required.
+ * Force loading of currently-executing domain state on the specified CPU.
+ * This is used to counteract lazy state switching where required.
*/
extern void sync_lazy_execstate_cpu(unsigned int cpu);
-extern void sync_lazy_execstate_mask(cpumask_t mask);
-extern void sync_lazy_execstate_all(void);
-extern int __sync_lazy_execstate(void);
/*
* Called by the scheduler to switch to another VCPU. On entry, although
* The callee must ensure that the local CPU is no longer running in @prev's
* context, and that the context is saved to memory, before returning.
* Alternatively, if implementing lazy context switching, it suffices to ensure
- * that invoking __sync_lazy_execstate() will switch and commit @prev's state.
+ * that invoking sync_lazy_execstate() will switch and commit @prev's state.
*/
extern void context_switch(
struct vcpu *prev,
extern void continue_running(
struct vcpu *same);
-int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
+/* Is CPU 'cpu' idle right now? */
+int idle_cpu(int cpu);
void startup_cpu_idle_loop(void);
static inline void vcpu_unblock(struct vcpu *v)
{
if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
- domain_wake(v);
+ vcpu_wake(v);
}
#define IS_PRIV(_d) \