xen: arm: refactor 64-bit return from trap path
authorIan Campbell <ian.campbell@citrix.com>
Mon, 29 Jul 2013 12:21:00 +0000 (13:21 +0100)
committerIan Campbell <ian.campbell@citrix.com>
Mon, 29 Jul 2013 15:54:50 +0000 (16:54 +0100)
Refactor exit path to use a single "exit" macro similar to the entry path.

We can also remove the logic at "return_to_new_vcpu" which detects returns to
hypervisor mode -- seemingly trying to handle hypervisor threads which aren't
an thing which we have. The idle VCPUs do not take this path. This simplifies
the return_to_new_vcpu code, we also split it into 32- and 64-bit VCPU paths.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
xen/arch/arm/arm32/entry.S
xen/arch/arm/arm64/entry.S
xen/arch/arm/domain.c

index 6cdf0aab4bef24c352022d853cd29f03b8d45b26..81d5990803871e0dd2ac749bf4874bd70e87e2da 100644 (file)
@@ -87,7 +87,7 @@ DEFINE_TRAP_ENTRY_NOIRQ(fiq)
 
 return_from_trap:
         mov sp, r11
-ENTRY(return_to_new_vcpu)
+ENTRY(return_to_new_vcpu32)
         ldr r11, [sp, #UREGS_cpsr]
         and r11, #PSR_MODE_MASK
         cmp r11, #PSR_MODE_HYP
index c0d2bd82c70dd1e0edc08e6a58e380cdcff32576..390a11d36fa53f39ff674754e734de9399cdffe9 100644 (file)
@@ -57,7 +57,7 @@ lr      .req    x30             // link register
         .endm
 
 /*
- * Save state on entry to hypervisor
+ * Save state on entry to hypervisor, restore on exit
  */
         .macro  entry, hyp, compat
         sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
@@ -96,6 +96,18 @@ lr      .req    x30             // link register
 
         .endm
 
+        .macro  exit, hyp, compat
+
+        .if \hyp == 0         /* Guest mode */
+
+        bl      leave_hypervisor_tail /* Disables interrupts on return */
+
+        .endif
+
+        b       return_from_trap
+
+        .endm
+
 /*
  * Bad Abort numbers
  *-----------------
@@ -133,13 +145,13 @@ hyp_sync:
         msr     daifclr, #2
         mov     x0, sp
         bl      do_trap_hypervisor
-        b       return_to_hypervisor
+        exit    hyp=1
 
 hyp_irq:
         entry   hyp=1
         mov     x0, sp
         bl      do_trap_irq
-        b       return_to_hypervisor
+        exit    hyp=1
 
 guest_sync:
         entry   hyp=0, compat=0
@@ -162,13 +174,13 @@ guest_sync_compat:
         msr     daifclr, #2
         mov     x0, sp
         bl      do_trap_hypervisor
-        b       return_to_guest
+        exit    hyp=0, compat=1
 
 guest_irq_compat:
         entry   hyp=0, compat=1
         mov     x0, sp
         bl      do_trap_irq
-        b       return_to_guest
+        exit    hyp=0, compat=1
 
 guest_fiq_invalid_compat:
         entry   hyp=0, compat=1
@@ -178,18 +190,12 @@ guest_error_invalid_compat:
         entry   hyp=0, compat=1
         invalid BAD_ERROR
 
-ENTRY(return_to_new_vcpu)
-        ldr     x21, [sp, #UREGS_CPSR]
-        and     x21, x21, #PSR_MODE_MASK
-        /* Returning to EL2? */
-        cmp     x21, #PSR_MODE_EL2t
-        ccmp    x21, #PSR_MODE_EL2h, #0x4, ne
-        b.eq    return_to_hypervisor /* Yes */
-        /* Fall thru */
-return_to_guest:
-        bl      leave_hypervisor_tail /* Disables interrupts on return */
-        /* Fall thru */
-return_to_hypervisor:
+ENTRY(return_to_new_vcpu32)
+        exit    hyp=0, compat=1
+ENTRY(return_to_new_vcpu64)
+        exit    hyp=0, compat=0
+
+return_from_trap:
         msr     daifset, #2 /* Mask interrupts */
 
         ldp     x21, x22, [sp, #UREGS_PC]       // load ELR, SPSR
index b4d99f190c0d8b91c5a13b8a17a923ab5fa08f59..4e9cece3806245490cf8a8a79d65410d52aea82c 100644 (file)
@@ -250,9 +250,13 @@ static void continue_new_vcpu(struct vcpu *prev)
 
     if ( is_idle_vcpu(current) )
         reset_stack_and_jump(idle_loop);
+    else if is_pv32_domain(current->domain)
+        /* check_wakeup_from_wait(); */
+        reset_stack_and_jump(return_to_new_vcpu32);
     else
         /* check_wakeup_from_wait(); */
-        reset_stack_and_jump(return_to_new_vcpu);
+        reset_stack_and_jump(return_to_new_vcpu64);
+
 }
 
 void context_switch(struct vcpu *prev, struct vcpu *next)