{
ASSERT(local_irq_is_enabled());
ASSERT(prev != next);
- ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
+ ASSERT(!vcpu_cpu_dirty(next));
if ( prev != next )
update_runstate_area(prev);
ASSERT(is_idle_vcpu(v));
/* TODO
cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
- cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+ v->dirty_cpu = v->processor;
*/
reset_stack_and_jump(idle_loop);
ASSERT(is_idle_vcpu(v));
cpumask_set_cpu(v->processor, v->domain->domain_dirty_cpumask);
- cpumask_set_cpu(v->processor, v->vcpu_dirty_cpumask);
+ v->dirty_cpu = v->processor;
reset_stack_and_jump(idle_loop);
}
struct desc_ptr gdt_desc;
ASSERT(p != n);
- ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
+ ASSERT(!vcpu_cpu_dirty(n));
if ( !is_idle_domain(pd) )
{
*/
if ( pd != nd )
cpumask_set_cpu(cpu, nd->domain_dirty_cpumask);
- cpumask_set_cpu(cpu, n->vcpu_dirty_cpumask);
+ n->dirty_cpu = cpu;
if ( !is_idle_domain(nd) )
{
if ( pd != nd )
cpumask_clear_cpu(cpu, pd->domain_dirty_cpumask);
- cpumask_clear_cpu(cpu, p->vcpu_dirty_cpumask);
+ p->dirty_cpu = VCPU_CPU_CLEAN;
per_cpu(curr_vcpu, cpu) = n;
}
{
unsigned int cpu = smp_processor_id();
const struct domain *prevd = prev->domain, *nextd = next->domain;
- cpumask_t dirty_mask;
+ unsigned int dirty_cpu = next->dirty_cpu;
ASSERT(local_irq_is_enabled());
get_cpu_info()->xen_cr3 = 0;
- cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
- /* Allow at most one CPU at a time to be dirty. */
- ASSERT(cpumask_weight(&dirty_mask) <= 1);
- if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
- !cpumask_empty(&dirty_mask)) )
+ if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN )
{
- /* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_VCPU_STATE);
+ /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
+ flush_mask(cpumask_of(dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
}
if ( prev != next )
void sync_vcpu_execstate(struct vcpu *v)
{
- if ( cpumask_test_cpu(smp_processor_id(), v->vcpu_dirty_cpumask) )
+ if ( v->dirty_cpu == smp_processor_id() )
sync_local_execstate();
- /* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_VCPU_STATE);
+ if ( vcpu_cpu_dirty(v) )
+ {
+ /* Remote CPU calls __sync_local_execstate() from flush IPI handler. */
+ flush_mask(cpumask_of(v->dirty_cpu), FLUSH_TLB | FLUSH_VCPU_STATE);
+ }
}
static int relinquish_memory(
for_each_vcpu ( pg_owner, v )
{
if ( pv_destroy_ldt(v) )
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ flush_tlb_mask(cpumask_of(v->dirty_cpu));
}
}
put_page(page);
vcpu_id += vcpu_bias;
if ( (vcpu_id >= d->max_vcpus) )
return 0;
- if ( ((v = d->vcpu[vcpu_id]) != NULL) )
- cpumask_or(pmask, pmask, v->vcpu_dirty_cpumask);
+ if ( ((v = d->vcpu[vcpu_id]) != NULL) && vcpu_cpu_dirty(v) )
+ __cpumask_set_cpu(v->dirty_cpu, pmask);
}
}
}
v->domain = d;
v->vcpu_id = vcpu_id;
+ v->dirty_cpu = VCPU_CPU_CLEAN;
spin_lock_init(&v->virq_lock);
if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
!zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
!zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
- !zalloc_cpumask_var(&v->cpu_soft_affinity) ||
- !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
+ !zalloc_cpumask_var(&v->cpu_soft_affinity) )
goto fail_free;
if ( is_idle_domain(d) )
free_cpumask_var(v->cpu_hard_affinity_tmp);
free_cpumask_var(v->cpu_hard_affinity_saved);
free_cpumask_var(v->cpu_soft_affinity);
- free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
}
free_cpumask_var(v->cpu_hard_affinity_tmp);
free_cpumask_var(v->cpu_hard_affinity_saved);
free_cpumask_var(v->cpu_soft_affinity);
- free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
}
v->is_running ? 'T':'F', v->poll_evtchn,
vcpu_info(v, evtchn_upcall_pending),
!vcpu_event_delivery_is_enabled(v));
- cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
- printk("dirty_cpus=%s\n", tmpstr);
+ if ( vcpu_cpu_dirty(v) )
+ printk("dirty_cpu=%u", v->dirty_cpu);
+ printk("\n");
cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
printk(" cpu_hard_affinity=%s ", tmpstr);
cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
bool hcall_compat;
#endif
+ /* The CPU, if any, which is holding onto this VCPU's state. */
+#define VCPU_CPU_CLEAN (~0u)
+ unsigned int dirty_cpu;
/*
* > 0: a single port is being polled;
/* Bitmask of CPUs on which this VCPU prefers to run. */
cpumask_var_t cpu_soft_affinity;
- /* Bitmask of CPUs which are holding onto this VCPU's state. */
- cpumask_var_t vcpu_dirty_cpumask;
-
/* Tasklet for continue_hypercall_on_cpu(). */
struct tasklet continue_hypercall_tasklet;
atomic_read(&v->domain->pause_count));
}
+static inline bool vcpu_cpu_dirty(const struct vcpu *v)
+{
+ BUILD_BUG_ON(NR_CPUS >= VCPU_CPU_CLEAN);
+ return v->dirty_cpu != VCPU_CPU_CLEAN;
+}
+
void vcpu_block(void);
void vcpu_unblock(struct vcpu *v);
void vcpu_pause(struct vcpu *v);