From 7241fffcd918bce9824b9cca7f90cea1d5689062 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Roger=20Pau=20Monn=C3=A9?= Date: Fri, 2 Dec 2016 18:07:58 +0100 Subject: [PATCH] x86: allow calling {shadow/hap}_set_allocation with the idle domain MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit ... and using the "preempted" parameter. Introduce a new helper that can be used from both hypercall or idle vcpu context (ie: during Dom0 creation) in order to check if preemption is needed. If such preemption happens, the caller should then call process_pending_softirqs in order to drain the pending softirqs, and then call *_set_allocation again to continue with it's execution. This allows us to call *_set_allocation() when building domain 0. While there also document hypercall_preempt_check and add an assert to local_events_need_delivery in order to be sure it's not called by the idle domain, which doesn't receive any events (and that in turn hypercall_preempt_check is also not called by the idle domain). Signed-off-by: Roger Pau Monné Acked-by: George Dunlap Acked-by: Tim Deegan --- xen/arch/x86/mm/hap/hap.c | 2 +- xen/arch/x86/mm/shadow/common.c | 2 +- xen/include/asm-x86/event.h | 3 +++ xen/include/xen/sched.h | 15 +++++++++++++++ 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index f099e9412f..b9faba6834 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -379,7 +379,7 @@ hap_set_allocation(struct domain *d, unsigned int pages, int *preempted) break; /* Check to see if we need to yield and try again */ - if ( preempted && hypercall_preempt_check() ) + if ( preempted && general_preempt_check() ) { *preempted = 1; return 0; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 756c2760c4..ddbdb735fa 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1681,7 +1681,7 @@ static int sh_set_allocation(struct domain *d, break; /* Check to see if we need to yield and try again */ - if ( preempted && hypercall_preempt_check() ) + if ( preempted && general_preempt_check() ) { *preempted = 1; return 0; diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h index a82062e339..d589d6f86c 100644 --- a/xen/include/asm-x86/event.h +++ b/xen/include/asm-x86/event.h @@ -23,6 +23,9 @@ int hvm_local_events_need_delivery(struct vcpu *v); static inline int local_events_need_delivery(void) { struct vcpu *v = current; + + ASSERT(!is_idle_vcpu(v)); + return (has_hvm_container_vcpu(v) ? hvm_local_events_need_delivery(v) : (vcpu_info(v, evtchn_upcall_pending) && !vcpu_info(v, evtchn_upcall_mask))); diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 1fbda87813..063efe610f 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -708,11 +708,26 @@ unsigned long hypercall_create_continuation( unsigned int op, const char *format, ...); void hypercall_cancel_continuation(void); +/* + * For long-running operations that must be in hypercall context, check + * if there is background work to be done that should interrupt this + * operation. + */ #define hypercall_preempt_check() (unlikely( \ softirq_pending(smp_processor_id()) | \ local_events_need_delivery() \ )) +/* + * For long-running operations that may be in hypercall context or on + * the idle vcpu (e.g. during dom0 construction), check if there is + * background work to be done that should interrupt this operation. + */ +#define general_preempt_check() (unlikely( \ + softirq_pending(smp_processor_id()) || \ + (!is_idle_vcpu(current) && local_events_need_delivery()) \ + )) + extern struct domain *domain_list; /* Caller must hold the domlist_read_lock or domlist_update_lock. */ -- 2.30.2