xen: arm: arm64: Adding VFP save/restore support.
authorPranavkumar Sawargaonkar <pranavkumar@linaro.org>
Thu, 6 Feb 2014 07:28:42 +0000 (12:58 +0530)
committerIan Campbell <ian.campbell@citrix.com>
Thu, 6 Feb 2014 11:53:56 +0000 (11:53 +0000)
This patch adds VFP save/restore support form arm64 across context switch.

Signed-off-by: Pranavkumar Sawargaonkar <pranavkumar@linaro.org>
Signed-off-by: Anup Patel <anup.patel@linaro.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
[ ijc -- dropped now obsolete TODO comments ]

xen/arch/arm/arm64/vfp.c
xen/include/asm-arm/arm64/vfp.h

index 74e6a50579853271f40592406b0cb107cb9a995b..c09cf0cce43cd98df3f183bee028686b8a0fd8fb 100644 (file)
@@ -1,13 +1,60 @@
 #include <xen/sched.h>
 #include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/vfp.h>
 
 void vfp_save_state(struct vcpu *v)
 {
-    /* TODO: implement it */
+    if ( !cpu_has_fp )
+        return;
+
+    asm volatile("stp q0, q1, [%0, #16 * 0]\n\t"
+                 "stp q2, q3, [%0, #16 * 2]\n\t"
+                 "stp q4, q5, [%0, #16 * 4]\n\t"
+                 "stp q6, q7, [%0, #16 * 6]\n\t"
+                 "stp q8, q9, [%0, #16 * 8]\n\t"
+                 "stp q10, q11, [%0, #16 * 10]\n\t"
+                 "stp q12, q13, [%0, #16 * 12]\n\t"
+                 "stp q14, q15, [%0, #16 * 14]\n\t"
+                 "stp q16, q17, [%0, #16 * 16]\n\t"
+                 "stp q18, q19, [%0, #16 * 18]\n\t"
+                 "stp q20, q21, [%0, #16 * 20]\n\t"
+                 "stp q22, q23, [%0, #16 * 22]\n\t"
+                 "stp q24, q25, [%0, #16 * 24]\n\t"
+                 "stp q26, q27, [%0, #16 * 26]\n\t"
+                 "stp q28, q29, [%0, #16 * 28]\n\t"
+                 "stp q30, q31, [%0, #16 * 30]\n\t"
+                 :: "r" ((char *)(&v->arch.vfp.fpregs)): "memory");
+
+    v->arch.vfp.fpsr = READ_SYSREG32(FPSR);
+    v->arch.vfp.fpcr = READ_SYSREG32(FPCR);
+    v->arch.vfp.fpexc32_el2 = READ_SYSREG32(FPEXC32_EL2);
 }
 
 void vfp_restore_state(struct vcpu *v)
 {
-    /* TODO: implement it */
+    if ( !cpu_has_fp )
+        return;
+
+    asm volatile("ldp q0, q1, [%0, #16 * 0]\n\t"
+                 "ldp q2, q3, [%0, #16 * 2]\n\t"
+                 "ldp q4, q5, [%0, #16 * 4]\n\t"
+                 "ldp q6, q7, [%0, #16 * 6]\n\t"
+                 "ldp q8, q9, [%0, #16 * 8]\n\t"
+                 "ldp q10, q11, [%0, #16 * 10]\n\t"
+                 "ldp q12, q13, [%0, #16 * 12]\n\t"
+                 "ldp q14, q15, [%0, #16 * 14]\n\t"
+                 "ldp q16, q17, [%0, #16 * 16]\n\t"
+                 "ldp q18, q19, [%0, #16 * 18]\n\t"
+                 "ldp q20, q21, [%0, #16 * 20]\n\t"
+                 "ldp q22, q23, [%0, #16 * 22]\n\t"
+                 "ldp q24, q25, [%0, #16 * 24]\n\t"
+                 "ldp q26, q27, [%0, #16 * 26]\n\t"
+                 "ldp q28, q29, [%0, #16 * 28]\n\t"
+                 "ldp q30, q31, [%0, #16 * 30]\n\t"
+                 :: "r" ((char *)(&v->arch.vfp.fpregs)): "memory");
+
+    WRITE_SYSREG32(v->arch.vfp.fpsr, FPSR);
+    WRITE_SYSREG32(v->arch.vfp.fpcr, FPCR);
+    WRITE_SYSREG32(v->arch.vfp.fpexc32_el2, FPEXC32_EL2);
 }
index 3733d2cfb74f6a56a2e36b9824475f96f3311735..373f156b2e23568a03ed48e5f641f223409c5fbc 100644 (file)
@@ -3,6 +3,10 @@
 
 struct vfp_state
 {
+    uint64_t fpregs[64];
+    uint32_t fpcr;
+    uint32_t fpexc32_el2;
+    uint32_t fpsr;
 };
 
 #endif /* _ARM_ARM64_VFP_H */