From: Keir Fraser Date: Wed, 17 Sep 2008 13:13:10 +0000 (+0100) Subject: x86: Allow continue_hypercall_on_cpu() to be called from within an X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14101^2~58 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=ff965b7ec9d8231cea61ca33a85cab8ffe318e4f;p=xen.git x86: Allow continue_hypercall_on_cpu() to be called from within an existing continuation handler. This fix is needed for the new method of microcode re-programming. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 31a100f81c..68205b29b9 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1356,6 +1356,7 @@ struct migrate_info { void *data; void (*saved_schedule_tail)(struct vcpu *); cpumask_t saved_affinity; + unsigned int nest; }; static void continue_hypercall_on_cpu_helper(struct vcpu *v) @@ -1363,48 +1364,64 @@ static void continue_hypercall_on_cpu_helper(struct vcpu *v) struct cpu_user_regs *regs = guest_cpu_user_regs(); struct migrate_info *info = v->arch.continue_info; cpumask_t mask = info->saved_affinity; + void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail; regs->eax = info->func(info->data); - v->arch.schedule_tail = info->saved_schedule_tail; - v->arch.continue_info = NULL; - - xfree(info); + if ( info->nest-- == 0 ) + { + xfree(info); + v->arch.schedule_tail = saved_schedule_tail; + v->arch.continue_info = NULL; + vcpu_unlock_affinity(v, &mask); + } - vcpu_unlock_affinity(v, &mask); - schedule_tail(v); + (*saved_schedule_tail)(v); } int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data) { struct vcpu *v = current; struct migrate_info *info; + cpumask_t mask = cpumask_of_cpu(cpu); int rc; if ( cpu == smp_processor_id() ) return func(data); - info = xmalloc(struct migrate_info); + info = v->arch.continue_info; if ( info == NULL ) - return -ENOMEM; + { + info = xmalloc(struct migrate_info); + if ( info == NULL ) + return -ENOMEM; - info->func = func; - info->data = data; - info->saved_schedule_tail = v->arch.schedule_tail; - info->saved_affinity = cpumask_of_cpu(cpu); + rc = vcpu_lock_affinity(v, &mask); + if ( rc ) + { + xfree(info); + return rc; + } - v->arch.schedule_tail = continue_hypercall_on_cpu_helper; - v->arch.continue_info = info; + info->saved_schedule_tail = v->arch.schedule_tail; + info->saved_affinity = mask; + info->nest = 0; - rc = vcpu_lock_affinity(v, &info->saved_affinity); - if ( rc ) + v->arch.schedule_tail = continue_hypercall_on_cpu_helper; + v->arch.continue_info = info; + } + else { - v->arch.schedule_tail = info->saved_schedule_tail; - v->arch.continue_info = NULL; - xfree(info); - return rc; + BUG_ON(info->nest != 0); + rc = vcpu_locked_change_affinity(v, &mask); + if ( rc ) + return rc; + info->nest++; } + info->func = func; + info->data = data; + /* Dummy return value will be overwritten by new schedule_tail. */ BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id()))); return 0; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 9083d7d786..12882d0204 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -380,6 +380,11 @@ int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity) return __vcpu_set_affinity(v, affinity, 0, 1); } +int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity) +{ + return __vcpu_set_affinity(v, affinity, 1, 1); +} + void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity) { cpumask_t online_affinity; diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 1aa3a0edf6..273126a949 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -527,6 +527,7 @@ void vcpu_force_reschedule(struct vcpu *v); void cpu_disable_scheduler(void); int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity); int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity); +int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity); void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity); void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);