.endm
/*
- * Save state on entry to hypervisor
+ * Save state on entry to hypervisor, restore on exit
*/
.macro entry, hyp, compat
sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
.endm
+ .macro exit, hyp, compat
+
+ .if \hyp == 0 /* Guest mode */
+
+ bl leave_hypervisor_tail /* Disables interrupts on return */
+
+ .endif
+
+ b return_from_trap
+
+ .endm
+
/*
* Bad Abort numbers
*-----------------
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
- b return_to_hypervisor
+ exit hyp=1
hyp_irq:
entry hyp=1
mov x0, sp
bl do_trap_irq
- b return_to_hypervisor
+ exit hyp=1
guest_sync:
entry hyp=0, compat=0
msr daifclr, #2
mov x0, sp
bl do_trap_hypervisor
- b return_to_guest
+ exit hyp=0, compat=1
guest_irq_compat:
entry hyp=0, compat=1
mov x0, sp
bl do_trap_irq
- b return_to_guest
+ exit hyp=0, compat=1
guest_fiq_invalid_compat:
entry hyp=0, compat=1
entry hyp=0, compat=1
invalid BAD_ERROR
-ENTRY(return_to_new_vcpu)
- ldr x21, [sp, #UREGS_CPSR]
- and x21, x21, #PSR_MODE_MASK
- /* Returning to EL2? */
- cmp x21, #PSR_MODE_EL2t
- ccmp x21, #PSR_MODE_EL2h, #0x4, ne
- b.eq return_to_hypervisor /* Yes */
- /* Fall thru */
-return_to_guest:
- bl leave_hypervisor_tail /* Disables interrupts on return */
- /* Fall thru */
-return_to_hypervisor:
+ENTRY(return_to_new_vcpu32)
+ exit hyp=0, compat=1
+ENTRY(return_to_new_vcpu64)
+ exit hyp=0, compat=0
+
+return_from_trap:
msr daifset, #2 /* Mask interrupts */
ldp x21, x22, [sp, #UREGS_PC] // load ELR, SPSR
if ( is_idle_vcpu(current) )
reset_stack_and_jump(idle_loop);
+ else if is_pv32_domain(current->domain)
+ /* check_wakeup_from_wait(); */
+ reset_stack_and_jump(return_to_new_vcpu32);
else
/* check_wakeup_from_wait(); */
- reset_stack_and_jump(return_to_new_vcpu);
+ reset_stack_and_jump(return_to_new_vcpu64);
+
}
void context_switch(struct vcpu *prev, struct vcpu *next)