DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
-static void continue_idle_domain(struct vcpu *v)
-{
- reset_stack_and_jump(idle_loop);
-}
-
-static void continue_nonidle_domain(struct vcpu *v)
-{
- /* check_wakeup_from_wait(); */
- reset_stack_and_jump(return_from_trap);
-}
-
void idle_loop(void)
{
for ( ; ; )
schedule_tail(prev);
if ( is_idle_vcpu(current) )
- continue_idle_domain(current);
+ reset_stack_and_jump(idle_loop);
else
- continue_nonidle_domain(current);
+ /* check_wakeup_from_wait(); */
+ reset_stack_and_jump(return_to_new_vcpu);
}
void context_switch(struct vcpu *prev, struct vcpu *next)
DEFINE_TRAP_ENTRY(irq)
DEFINE_TRAP_ENTRY(fiq)
-ENTRY(return_from_trap)
+return_from_trap:
+ mov sp, r11
+ENTRY(return_to_new_vcpu)
ldr r11, [sp, #UREGS_cpsr]
and r11, #PSR_MODE_MASK
cmp r11, #PSR_MODE_HYP
mov r11, sp
bic sp, #7 /* Align the stack pointer */
bl leave_hypervisor_tail
+ mov sp, r11
RESTORE_ONE_BANKED(SP_usr)
/* LR_usr is the same physical register as lr and is restored below */
RESTORE_BANKED(svc)