xen: arm: Handle SMC from 64-bit guests
authorIan Campbell <ian.campbell@citrix.com>
Mon, 29 Jul 2013 12:21:06 +0000 (13:21 +0100)
committerIan Campbell <ian.campbell@citrix.com>
Mon, 29 Jul 2013 15:54:51 +0000 (16:54 +0100)
Similarly to arm32 guests handle it by injecting an undefined instruction
trap.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
xen/arch/arm/traps.c
xen/include/asm-arm/processor.h
xen/include/public/arch-arm.h

index b4828f307c1a014fd4ccdc0f0b0f55c711abe8bb..1b9209d30d00e632976d7208ab6ce4c75b01c4cc 100644 (file)
@@ -284,25 +284,49 @@ static vaddr_t exception_handler(vaddr_t offset)
  * pipeline adjustments). See TakeUndefInstrException pseudocode in
  * ARM.
  */
-static void inject_undef_exception(struct cpu_user_regs *regs,
-                                   register_t preferred_return)
+static void inject_undef32_exception(struct cpu_user_regs *regs)
 {
     uint32_t spsr = regs->cpsr;
     int is_thumb = (regs->cpsr & PSR_THUMB);
     /* Saved PC points to the instruction past the faulting instruction. */
     uint32_t return_offset = is_thumb ? 2 : 4;
 
+    BUG_ON( !is_pv32_domain(current->domain) );
+
     /* Update processor mode */
     cpsr_switch_mode(regs, PSR_MODE_UND);
 
     /* Update banked registers */
     regs->spsr_und = spsr;
-    regs->lr_und = preferred_return + return_offset;
+    regs->lr_und = regs->pc32 + return_offset;
 
     /* Branch to exception vector */
     regs->pc32 = exception_handler(VECTOR32_UND);
 }
 
+#ifdef CONFIG_ARM_64
+/* Inject an undefined exception into a 64 bit guest */
+static void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len)
+{
+    union hsr esr = {
+        .iss = 0,
+        .len = instr_len,
+        .ec = HSR_EC_UNKNOWN,
+    };
+
+    BUG_ON( is_pv32_domain(current->domain) );
+
+    regs->spsr_el1 = regs->cpsr;
+    regs->elr_el1 = regs->pc;
+
+    regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \
+        PSR_IRQ_MASK | PSR_DBG_MASK;
+    regs->pc = READ_SYSREG(VBAR_EL1) + VECTOR64_CURRENT_SPx_SYNC;
+
+    WRITE_SYSREG32(esr.bits, ESR_EL1);
+}
+#endif
+
 struct reg_ctxt {
     /* Guest-side state */
     uint32_t sctlr_el1, tcr_el1;
@@ -1266,11 +1290,8 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
             goto bad_trap;
         do_cp15_64(regs, hsr);
         break;
-    case HSR_EC_SMC:
-        /* PC32 already contains the preferred exception return
-         * address, so no need to adjust here.
-         */
-        inject_undef_exception(regs, regs->pc32);
+    case HSR_EC_SMC32:
+        inject_undef32_exception(regs);
         break;
     case HSR_EC_HVC32:
 #ifndef NDEBUG
@@ -1291,6 +1312,9 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
             return do_trap_psci(regs);
         do_trap_hypercall(regs, &regs->x16, hsr.iss);
         break;
+    case HSR_EC_SMC64:
+        inject_undef64_exception(regs, hsr.len);
+        break;
     case HSR_EC_SYSREG:
         if ( is_pv32_domain(current->domain) )
             goto bad_trap;
index 960b83e8ad103a763c2dc341430968bfb0328004..948bf2de9ea4c78ef743cf4102e36e04d76507ad 100644 (file)
@@ -83,6 +83,7 @@
 #define HCR_SWIO        (1<<1) /* Set/Way Invalidation Override */
 #define HCR_VM          (1<<0) /* Virtual MMU Enable */
 
+#define HSR_EC_UNKNOWN              0x00
 #define HSR_EC_WFI_WFE              0x01
 #define HSR_EC_CP15_32              0x03
 #define HSR_EC_CP15_64              0x04
 #define HSR_EC_CP14_64              0x0c
 #define HSR_EC_SVC32                0x11
 #define HSR_EC_HVC32                0x12
-#define HSR_EC_SMC                  0x13
+#define HSR_EC_SMC32                0x13
 #ifdef CONFIG_ARM_64
 #define HSR_EC_HVC64                0x16
+#define HSR_EC_SMC64                0x17
 #define HSR_EC_SYSREG               0x18
 #endif
 #define HSR_EC_INSTR_ABORT_GUEST    0x20
@@ -388,11 +390,21 @@ union hsr {
 #define CNTx_CTL_PENDING  (1u<<2)  /* IRQ pending */
 
 /* Exception Vector offsets */
+/* ... ARM32 */
 #define VECTOR32_RST  0
 #define VECTOR32_UND  4
 #define VECTOR32_SVC  8
 #define VECTOR32_PABT 12
 #define VECTOR32_DABT 16
+/* ... ARM64 */
+#define VECTOR64_CURRENT_SP0_SYNC  0x000
+#define VECTOR64_CURRENT_SP0_IRQ   0x080
+#define VECTOR64_CURRENT_SP0_FIQ   0x100
+#define VECTOR64_CURRENT_SP0_ERROR 0x180
+#define VECTOR64_CURRENT_SPx_SYNC  0x200
+#define VECTOR64_CURRENT_SPx_IRQ   0x280
+#define VECTOR64_CURRENT_SPx_FIQ   0x300
+#define VECTOR64_CURRENT_SPx_ERROR 0x380
 
 #if defined(CONFIG_ARM_32)
 # include <asm/arm32/processor.h>
index cea12b2ea8691a5f059c2b681534e7c4dbf6dd73..cbd53a9eda987ff60709db8c4dc26846003c97bd 100644 (file)
@@ -234,6 +234,9 @@ typedef uint64_t xen_callback_t;
 #define PSR_IRQ_MASK    (1<<7)        /* Interrupt mask */
 #define PSR_ABT_MASK    (1<<8)        /* Asynchronous Abort mask */
 #define PSR_BIG_ENDIAN  (1<<9)        /* Big Endian Mode */
+#ifdef __aarch64__ /* For Aarch64 bit 9 is repurposed. */
+#define PSR_DBG_MASK    (1<<9)
+#endif
 #define PSR_IT_MASK     (0x0600fc00)  /* Thumb If-Then Mask */
 #define PSR_JAZELLE     (1<<24)       /* Jazelle Mode */