When an idle VCPU is running, Xen will never exit the hypervisor mode.
Futhermore, some part of the VCPU/domain initialization is already skipped for
them to avoid memory consumption.
Actually each save/restore functions are checking themself if the vcpu is
an idle one or not. We can safely skipped the context switch in one place
and gain a bit of time when we {,un}schedule idle VCPU. This is because
the saving part will take care of disabling anything related to guest (such
as GICv).
Also replace every check of and idle VCPU in save/restore functions by an
ASSERT, to know if someone is calling them with an idle VCPU in argument.
Signed-off-by: Julien Grall <julien.grall@linaro.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
static void ctxt_switch_from(struct vcpu *p)
{
+ /* When the idle VCPU is running, Xen will always stay in hypervisor
+ * mode. Therefore we don't need to save the context of an idle VCPU.
+ */
+ if ( is_idle_vcpu(p) )
+ goto end_context;
+
p2m_save_state(p);
/* CP 15 */
gic_save_state(p);
isb();
+
+end_context:
context_saved(p);
}
static void ctxt_switch_to(struct vcpu *n)
{
+ /* When the idle VCPU is running, Xen will always stay in hypervisor
+ * mode. Therefore we don't need to restore the context of an idle VCPU.
+ */
+ if ( is_idle_vcpu(n) )
+ return;
+
p2m_restore_state(n);
WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2);
void gic_save_state(struct vcpu *v)
{
ASSERT(!local_irq_is_enabled());
+ ASSERT(!is_idle_vcpu(v));
/* No need for spinlocks here because interrupts are disabled around
* this call and it only accesses struct vcpu fields that cannot be
void gic_restore_state(struct vcpu *v)
{
ASSERT(!local_irq_is_enabled());
-
- if ( is_idle_vcpu(v) )
- return;
+ ASSERT(!is_idle_vcpu(v));
this_cpu(lr_mask) = v->arch.lr_mask;
gic_hw_ops->restore_state(v);
int virt_timer_save(struct vcpu *v)
{
- if ( is_idle_domain(v->domain) )
- return 0;
+ ASSERT(!is_idle_vcpu(v));
v->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0);
WRITE_SYSREG32(v->arch.virt_timer.ctl & ~CNTx_CTL_ENABLE, CNTV_CTL_EL0);
int virt_timer_restore(struct vcpu *v)
{
- if ( is_idle_domain(v->domain) )
- return 0;
+ ASSERT(!is_idle_vcpu(v));
stop_timer(&v->arch.virt_timer.timer);
migrate_timer(&v->arch.virt_timer.timer, v->processor);