From ef4c6b079cc55edf5b82cb5576cbe571df7d71a8 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Thu, 4 Sep 2008 14:38:26 +0100 Subject: [PATCH] More efficient implementation of SCHEDOP_poll when polling a single port. Signed-off-by: Keir Fraser --- xen/common/domain.c | 4 ++- xen/common/event_channel.c | 19 ++++++++----- xen/common/schedule.c | 55 +++++++++++++++++++++++++++++++------- xen/include/xen/sched.h | 21 ++++++++------- 4 files changed, 71 insertions(+), 28 deletions(-) diff --git a/xen/common/domain.c b/xen/common/domain.c index 6bf1f49161..353242dc2c 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -651,9 +651,11 @@ void vcpu_reset(struct vcpu *v) set_bit(_VPF_down, &v->pause_flags); + clear_bit(v->vcpu_id, d->poll_mask); + v->poll_evtchn = 0; + v->fpu_initialised = 0; v->fpu_dirtied = 0; - v->is_polling = 0; v->is_initialised = 0; v->nmi_pending = 0; v->mce_pending = 0; diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 3e8956e0e7..6686b2294b 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -545,6 +545,7 @@ out: static int evtchn_set_pending(struct vcpu *v, int port) { struct domain *d = v->domain; + int vcpuid; /* * The following bit operations must happen in strict order. @@ -564,15 +565,19 @@ static int evtchn_set_pending(struct vcpu *v, int port) } /* Check if some VCPU might be polling for this event. */ - if ( unlikely(d->is_polling) ) + if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) ) + return 0; + + /* Wake any interested (or potentially interested) pollers. */ + for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS); + vcpuid < MAX_VIRT_CPUS; + vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) ) { - d->is_polling = 0; - smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */ - for_each_vcpu ( d, v ) + v = d->vcpu[vcpuid]; + if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && + test_and_clear_bit(vcpuid, d->poll_mask) ) { - if ( !v->is_polling ) - continue; - v->is_polling = 0; + v->poll_evtchn = 0; vcpu_unblock(v); } } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 7d32c99485..3b015b06ac 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -198,6 +198,27 @@ void vcpu_wake(struct vcpu *v) TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } +void vcpu_unblock(struct vcpu *v) +{ + if ( !test_and_clear_bit(_VPF_blocked, &v->pause_flags) ) + return; + + /* Polling period ends when a VCPU is unblocked. */ + if ( unlikely(v->poll_evtchn != 0) ) + { + v->poll_evtchn = 0; + /* + * We *must* re-clear _VPF_blocked to avoid racing other wakeups of + * this VCPU (and it then going back to sleep on poll_mask). + * Test-and-clear is idiomatic and ensures clear_bit not reordered. + */ + if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) ) + clear_bit(_VPF_blocked, &v->pause_flags); + } + + vcpu_wake(v); +} + static void vcpu_migrate(struct vcpu *v) { unsigned long flags; @@ -337,7 +358,7 @@ static long do_poll(struct sched_poll *sched_poll) struct vcpu *v = current; struct domain *d = v->domain; evtchn_port_t port; - long rc = 0; + long rc; unsigned int i; /* Fairly arbitrary limit. */ @@ -348,11 +369,24 @@ static long do_poll(struct sched_poll *sched_poll) return -EFAULT; set_bit(_VPF_blocked, &v->pause_flags); - v->is_polling = 1; - d->is_polling = 1; + v->poll_evtchn = -1; + set_bit(v->vcpu_id, d->poll_mask); +#ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */ /* Check for events /after/ setting flags: avoids wakeup waiting race. */ - smp_wmb(); + smp_mb(); + + /* + * Someone may have seen we are blocked but not that we are polling, or + * vice versa. We are certainly being woken, so clean up and bail. Beyond + * this point others can be guaranteed to clean up for us if they wake us. + */ + rc = 0; + if ( (v->poll_evtchn == 0) || + !test_bit(_VPF_blocked, &v->pause_flags) || + !test_bit(v->vcpu_id, d->poll_mask) ) + goto out; +#endif for ( i = 0; i < sched_poll->nr_ports; i++ ) { @@ -369,6 +403,9 @@ static long do_poll(struct sched_poll *sched_poll) goto out; } + if ( sched_poll->nr_ports == 1 ) + v->poll_evtchn = port; + if ( sched_poll->timeout != 0 ) set_timer(&v->poll_timer, sched_poll->timeout); @@ -378,7 +415,8 @@ static long do_poll(struct sched_poll *sched_poll) return 0; out: - v->is_polling = 0; + v->poll_evtchn = 0; + clear_bit(v->vcpu_id, d->poll_mask); clear_bit(_VPF_blocked, &v->pause_flags); return rc; } @@ -760,11 +798,8 @@ static void poll_timer_fn(void *data) { struct vcpu *v = data; - if ( !v->is_polling ) - return; - - v->is_polling = 0; - vcpu_unblock(v); + if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) ) + vcpu_unblock(v); } /* Initialise the data structures. */ diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index b6a3faabfa..b083b10a73 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -106,8 +106,6 @@ struct vcpu bool_t fpu_initialised; /* Has the FPU been used since it was last saved? */ bool_t fpu_dirtied; - /* Is this VCPU polling any event channels (SCHEDOP_poll)? */ - bool_t is_polling; /* Initialization completed for this VCPU? */ bool_t is_initialised; /* Currently running on a CPU? */ @@ -134,6 +132,13 @@ struct vcpu /* VCPU affinity is temporarily locked from controller changes? */ bool_t affinity_locked; + /* + * > 0: a single port is being polled; + * = 0: nothing is being polled (vcpu should be clear in d->poll_mask); + * < 0: multiple ports may be being polled. + */ + int poll_evtchn; + unsigned long pause_flags; atomic_t pause_count; @@ -209,8 +214,6 @@ struct domain struct domain *target; /* Is this guest being debugged by dom0? */ bool_t debugger_attached; - /* Are any VCPUs polling event channels (SCHEDOP_poll)? */ - bool_t is_polling; /* Is this guest dying (i.e., a zombie)? */ enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying; /* Domain is paused by controller software? */ @@ -218,6 +221,9 @@ struct domain /* Domain's VCPUs are pinned 1:1 to physical CPUs? */ bool_t is_pinned; + /* Are any VCPUs polling event channels (SCHEDOP_poll)? */ + DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS); + /* Guest has shut down (inc. reason code)? */ spinlock_t shutdown_lock; bool_t is_shutting_down; /* in process of shutting down? */ @@ -507,6 +513,7 @@ static inline int vcpu_runnable(struct vcpu *v) atomic_read(&v->domain->pause_count)); } +void vcpu_unblock(struct vcpu *v); void vcpu_pause(struct vcpu *v); void vcpu_pause_nosync(struct vcpu *v); void domain_pause(struct domain *d); @@ -523,12 +530,6 @@ void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity); void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate); -static inline void vcpu_unblock(struct vcpu *v) -{ - if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) ) - vcpu_wake(v); -} - #define IS_PRIV(_d) ((_d)->is_privileged) #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t))) -- 2.30.2