From: kaf24@firebug.cl.cam.ac.uk Date: Fri, 26 Aug 2005 09:29:54 +0000 (+0000) Subject: Clean up and fix domain_pause and friends. Synchronous X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~16835 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=4fb2acfb09468669ebbeaa6570d42403bb8ffc4f;p=xen.git Clean up and fix domain_pause and friends. Synchronous pause should not only wait for the running flag to clear, but also for the scheduler lock to be released. Also get rid of some unused sync_lazy_execstate functions. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/ia64/linux-xen/irq_ia64.c b/xen/arch/ia64/linux-xen/irq_ia64.c index 478f3343ee..a3762df227 100644 --- a/xen/arch/ia64/linux-xen/irq_ia64.c +++ b/xen/arch/ia64/linux-xen/irq_ia64.c @@ -265,7 +265,7 @@ vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) */ vmx_irq_exit(); if ( wake_dom0 && current != dom0 ) - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); } #endif diff --git a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c index 473db52b26..303f7e0d35 100644 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c @@ -116,7 +116,7 @@ + */ + vmx_irq_exit(); + if ( wake_dom0 && current != dom0 ) -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); +} +#endif + diff --git a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c index 8d991ddd2c..2e96676814 100644 --- a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c +++ b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c @@ -73,7 +73,7 @@ +#endif + //FIXME: TEMPORARY HACK!!!! + vcpu_pend_interrupt(dom0->vcpu[0],vector); -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); + } + else +#endif diff --git a/xen/arch/ia64/patch/linux-2.6.7/time.c b/xen/arch/ia64/patch/linux-2.6.7/time.c index 1b6263ce4f..27b01da8c2 100644 --- a/xen/arch/ia64/patch/linux-2.6.7/time.c +++ b/xen/arch/ia64/patch/linux-2.6.7/time.c @@ -209,14 +209,14 @@ + if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { + vcpu_pend_timer(dom0->vcpu[0]); + //vcpu_set_next_timer(dom0->vcpu[0]); -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); + } + if (!is_idle_task(current->domain) && current->domain != dom0) { + if (vcpu_timer_expired(current)) { + vcpu_pend_timer(current); + // ensure another timer interrupt happens even if domain doesn't + vcpu_set_next_timer(current); -+ domain_wake(current); ++ vcpu_wake(current); + } + } + raise_actimer_softirq(); diff --git a/xen/arch/ia64/xenirq.c b/xen/arch/ia64/xenirq.c index 5bf09171c8..296cd3267b 100644 --- a/xen/arch/ia64/xenirq.c +++ b/xen/arch/ia64/xenirq.c @@ -50,7 +50,7 @@ xen_do_IRQ(ia64_vector vector) #endif //FIXME: TEMPORARY HACK!!!! vcpu_pend_interrupt(dom0->vcpu[0],vector); - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); return(1); } return(0); diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c index 2fc45e85db..c76a93eca8 100644 --- a/xen/arch/ia64/xenmisc.c +++ b/xen/arch/ia64/xenmisc.c @@ -59,8 +59,6 @@ platform_is_hp_ski(void) /* calls in xen/common code that are unused on ia64 */ void sync_lazy_execstate_cpu(unsigned int cpu) {} -void sync_lazy_execstate_mask(cpumask_t mask) {} -void sync_lazy_execstate_all(void) {} #ifdef CONFIG_VTI int grant_table_create(struct domain *d) { return 0; } diff --git a/xen/arch/ia64/xentime.c b/xen/arch/ia64/xentime.c index 25b220cbf9..843b9f3315 100644 --- a/xen/arch/ia64/xentime.c +++ b/xen/arch/ia64/xentime.c @@ -162,14 +162,14 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { vcpu_pend_timer(dom0->vcpu[0]); //vcpu_set_next_timer(dom0->vcpu[0]); - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); } if (!is_idle_task(current->domain) && current->domain != dom0) { if (vcpu_timer_expired(current)) { vcpu_pend_timer(current); // ensure another timer interrupt happens even if domain doesn't vcpu_set_next_timer(current); - domain_wake(current); + vcpu_wake(current); } } raise_actimer_softirq(); diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c index fee6312155..97e48ef77c 100644 --- a/xen/arch/x86/audit.c +++ b/xen/arch/x86/audit.c @@ -735,7 +735,6 @@ void _audit_domain(struct domain *d, int flags) if ( d != current->domain ) domain_pause(d); - sync_lazy_execstate_all(); // Maybe we should just be using BIGLOCK? // diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 4f4187dda7..db1d630e64 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -888,24 +888,14 @@ int __sync_lazy_execstate(void) void sync_lazy_execstate_cpu(unsigned int cpu) { if ( cpu == smp_processor_id() ) + { (void)__sync_lazy_execstate(); + } else + { + /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ flush_tlb_mask(cpumask_of_cpu(cpu)); -} - -void sync_lazy_execstate_mask(cpumask_t mask) -{ - if ( cpu_isset(smp_processor_id(), mask) ) - (void)__sync_lazy_execstate(); - /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ - flush_tlb_mask(mask); -} - -void sync_lazy_execstate_all(void) -{ - __sync_lazy_execstate(); - /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ - flush_tlb_mask(cpu_online_map); + } } unsigned long __hypercall_create_continuation( diff --git a/xen/common/domain.c b/xen/common/domain.c index b939d90f77..713e5bb3ff 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -152,10 +152,7 @@ static void domain_shutdown_finalise(void) /* Make sure that every vcpu is descheduled before we finalise. */ for_each_vcpu ( d, v ) - while ( test_bit(_VCPUF_running, &v->vcpu_flags) ) - cpu_relax(); - - sync_lazy_execstate_mask(d->cpumask); + vcpu_sleep_sync(v); BUG_ON(!cpus_empty(d->cpumask)); sync_pagetable_state(d); @@ -209,7 +206,7 @@ void domain_shutdown(u8 reason) /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */ for_each_vcpu ( d, v ) - domain_sleep_nosync(v); + vcpu_sleep_nosync(v); } @@ -226,7 +223,7 @@ void domain_pause_for_debugger(void) for_each_vcpu ( d, v ) { set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags); - domain_sleep_nosync(v); + vcpu_sleep_nosync(v); } send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER); @@ -275,7 +272,7 @@ void vcpu_pause(struct vcpu *v) { BUG_ON(v == current); atomic_inc(&v->pausecnt); - domain_sleep_sync(v); + vcpu_sleep_sync(v); } void domain_pause(struct domain *d) @@ -286,7 +283,7 @@ void domain_pause(struct domain *d) { BUG_ON(v == current); atomic_inc(&v->pausecnt); - domain_sleep_sync(v); + vcpu_sleep_sync(v); } } @@ -294,7 +291,7 @@ void vcpu_unpause(struct vcpu *v) { BUG_ON(v == current); if ( atomic_dec_and_test(&v->pausecnt) ) - domain_wake(v); + vcpu_wake(v); } void domain_unpause(struct domain *d) @@ -313,7 +310,7 @@ void domain_pause_by_systemcontroller(struct domain *d) { BUG_ON(v == current); if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_sleep_sync(v); + vcpu_sleep_sync(v); } } @@ -324,7 +321,7 @@ void domain_unpause_by_systemcontroller(struct domain *d) for_each_vcpu ( d, v ) { if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); } } @@ -413,7 +410,7 @@ long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) /* domain_unpause_by_systemcontroller */ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); xfree(c); return 0; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 1a8eb12aa0..3cec109727 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -193,7 +193,7 @@ void sched_rem_domain(struct vcpu *v) TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id); } -void domain_sleep_nosync(struct vcpu *v) +void vcpu_sleep_nosync(struct vcpu *v) { unsigned long flags; @@ -205,18 +205,25 @@ void domain_sleep_nosync(struct vcpu *v) TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } -void domain_sleep_sync(struct vcpu *v) +void vcpu_sleep_sync(struct vcpu *v) { - domain_sleep_nosync(v); - - while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) ) + vcpu_sleep_nosync(v); + + /* + * We can be sure that the VCPU is finally descheduled after the running + * flag is cleared and the scheduler lock is released. + */ + while ( test_bit(_VCPUF_running, &v->vcpu_flags) + && !domain_runnable(v) + && spin_is_locked(&schedule_data[v->processor].schedule_lock) ) cpu_relax(); + /* Counteract lazy context switching. */ if ( cpu_isset(v->processor, v->domain->cpumask) ) sync_lazy_execstate_cpu(v->processor); } -void domain_wake(struct vcpu *v) +void vcpu_wake(struct vcpu *v) { unsigned long flags; @@ -293,7 +300,7 @@ static long do_vcpu_up(int vcpu) return -ESRCH; clear_bit(_VCPUF_down, &target->vcpu_flags); /* wake vcpu */ - domain_wake(target); + vcpu_wake(target); return 0; } @@ -457,10 +464,10 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd) } } } - } while (!succ); - //spin_lock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock); + } while ( !succ ); + SCHED_OP(adjdom, d, cmd); - //spin_unlock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock); + for (cpu = 0; cpu < NR_CPUS; cpu++) if (__get_cpu_bit(cpu, have_lock)) spin_unlock(&schedule_data[cpu].schedule_lock); @@ -520,7 +527,8 @@ static void __enter_scheduler(void) perfc_incrc(sched_ctx); #if defined(WAKE_HISTO) - if ( !is_idle_task(next->domain) && next->wokenup ) { + if ( !is_idle_task(next->domain) && next->wokenup ) + { ulong diff = (ulong)(now - next->wokenup); diff /= (ulong)MILLISECS(1); if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 9147b631bd..adab08a3ee 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -370,6 +370,8 @@ int new_guest_cr3(unsigned long pfn); void propagate_page_fault(unsigned long addr, u16 error_code); +extern int __sync_lazy_execstate(void); + /* * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must * hold a reference to the page. diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index e0ea2f756d..2d93f7576d 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -245,18 +245,15 @@ void sched_rem_domain(struct vcpu *); long sched_ctl(struct sched_ctl_cmd *); long sched_adjdom(struct sched_adjdom_cmd *); int sched_id(); -void domain_wake(struct vcpu *d); -void domain_sleep_nosync(struct vcpu *d); -void domain_sleep_sync(struct vcpu *d); +void vcpu_wake(struct vcpu *d); +void vcpu_sleep_nosync(struct vcpu *d); +void vcpu_sleep_sync(struct vcpu *d); /* - * Force loading of currently-executing domain state on the specified set - * of CPUs. This is used to counteract lazy state switching where required. + * Force loading of currently-executing domain state on the specified CPU. + * This is used to counteract lazy state switching where required. */ extern void sync_lazy_execstate_cpu(unsigned int cpu); -extern void sync_lazy_execstate_mask(cpumask_t mask); -extern void sync_lazy_execstate_all(void); -extern int __sync_lazy_execstate(void); /* * Called by the scheduler to switch to another VCPU. On entry, although @@ -268,7 +265,7 @@ extern int __sync_lazy_execstate(void); * The callee must ensure that the local CPU is no longer running in @prev's * context, and that the context is saved to memory, before returning. * Alternatively, if implementing lazy context switching, it suffices to ensure - * that invoking __sync_lazy_execstate() will switch and commit @prev's state. + * that invoking sync_lazy_execstate() will switch and commit @prev's state. */ extern void context_switch( struct vcpu *prev, @@ -287,7 +284,8 @@ extern void context_switch_finalise( extern void continue_running( struct vcpu *same); -int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */ +/* Is CPU 'cpu' idle right now? */ +int idle_cpu(int cpu); void startup_cpu_idle_loop(void); @@ -410,7 +408,7 @@ void cpu_init(void); static inline void vcpu_unblock(struct vcpu *v) { if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); } #define IS_PRIV(_d) \