xen: arm: arm64: Fix memory cloberring issues during VFP save restore.
authorPranavkumar Sawargaonkar <pranavkumar@linaro.org>
Fri, 7 Feb 2014 12:57:16 +0000 (18:27 +0530)
committerIan Campbell <ian.campbell@citrix.com>
Tue, 11 Feb 2014 12:43:06 +0000 (12:43 +0000)
This patch addresses memory cloberring issue mentioed by Julien Grall
with my earlier patch -
Commit Id: 712eb2e04da2cbcd9908f74ebd47c6df60d6d12f

Discussion related to this fix -
http://www.gossamer-threads.com/lists/xen/devel/316247

Signed-off-by: Pranavkumar Sawargaonkar <pranavkumar@linaro.org>
Signed-off-by: Anup Patel <anup.patel@linaro.org>
Acked-by: Julien Grall <julien.grall@linaro.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
xen/arch/arm/arm64/vfp.c

index c09cf0cce43cd98df3f183bee028686b8a0fd8fb..3cd2b1bac0cad725f7fee5af14ad920766238d93 100644 (file)
@@ -8,23 +8,23 @@ void vfp_save_state(struct vcpu *v)
     if ( !cpu_has_fp )
         return;
 
-    asm volatile("stp q0, q1, [%0, #16 * 0]\n\t"
-                 "stp q2, q3, [%0, #16 * 2]\n\t"
-                 "stp q4, q5, [%0, #16 * 4]\n\t"
-                 "stp q6, q7, [%0, #16 * 6]\n\t"
-                 "stp q8, q9, [%0, #16 * 8]\n\t"
-                 "stp q10, q11, [%0, #16 * 10]\n\t"
-                 "stp q12, q13, [%0, #16 * 12]\n\t"
-                 "stp q14, q15, [%0, #16 * 14]\n\t"
-                 "stp q16, q17, [%0, #16 * 16]\n\t"
-                 "stp q18, q19, [%0, #16 * 18]\n\t"
-                 "stp q20, q21, [%0, #16 * 20]\n\t"
-                 "stp q22, q23, [%0, #16 * 22]\n\t"
-                 "stp q24, q25, [%0, #16 * 24]\n\t"
-                 "stp q26, q27, [%0, #16 * 26]\n\t"
-                 "stp q28, q29, [%0, #16 * 28]\n\t"
-                 "stp q30, q31, [%0, #16 * 30]\n\t"
-                 :: "r" ((char *)(&v->arch.vfp.fpregs)): "memory");
+    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
+                 "stp q2, q3, [%1, #16 * 2]\n\t"
+                 "stp q4, q5, [%1, #16 * 4]\n\t"
+                 "stp q6, q7, [%1, #16 * 6]\n\t"
+                 "stp q8, q9, [%1, #16 * 8]\n\t"
+                 "stp q10, q11, [%1, #16 * 10]\n\t"
+                 "stp q12, q13, [%1, #16 * 12]\n\t"
+                 "stp q14, q15, [%1, #16 * 14]\n\t"
+                 "stp q16, q17, [%1, #16 * 16]\n\t"
+                 "stp q18, q19, [%1, #16 * 18]\n\t"
+                 "stp q20, q21, [%1, #16 * 20]\n\t"
+                 "stp q22, q23, [%1, #16 * 22]\n\t"
+                 "stp q24, q25, [%1, #16 * 24]\n\t"
+                 "stp q26, q27, [%1, #16 * 26]\n\t"
+                 "stp q28, q29, [%1, #16 * 28]\n\t"
+                 "stp q30, q31, [%1, #16 * 30]\n\t"
+                 : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs));
 
     v->arch.vfp.fpsr = READ_SYSREG32(FPSR);
     v->arch.vfp.fpcr = READ_SYSREG32(FPCR);
@@ -36,23 +36,23 @@ void vfp_restore_state(struct vcpu *v)
     if ( !cpu_has_fp )
         return;
 
-    asm volatile("ldp q0, q1, [%0, #16 * 0]\n\t"
-                 "ldp q2, q3, [%0, #16 * 2]\n\t"
-                 "ldp q4, q5, [%0, #16 * 4]\n\t"
-                 "ldp q6, q7, [%0, #16 * 6]\n\t"
-                 "ldp q8, q9, [%0, #16 * 8]\n\t"
-                 "ldp q10, q11, [%0, #16 * 10]\n\t"
-                 "ldp q12, q13, [%0, #16 * 12]\n\t"
-                 "ldp q14, q15, [%0, #16 * 14]\n\t"
-                 "ldp q16, q17, [%0, #16 * 16]\n\t"
-                 "ldp q18, q19, [%0, #16 * 18]\n\t"
-                 "ldp q20, q21, [%0, #16 * 20]\n\t"
-                 "ldp q22, q23, [%0, #16 * 22]\n\t"
-                 "ldp q24, q25, [%0, #16 * 24]\n\t"
-                 "ldp q26, q27, [%0, #16 * 26]\n\t"
-                 "ldp q28, q29, [%0, #16 * 28]\n\t"
-                 "ldp q30, q31, [%0, #16 * 30]\n\t"
-                 :: "r" ((char *)(&v->arch.vfp.fpregs)): "memory");
+    asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
+                 "ldp q2, q3, [%1, #16 * 2]\n\t"
+                 "ldp q4, q5, [%1, #16 * 4]\n\t"
+                 "ldp q6, q7, [%1, #16 * 6]\n\t"
+                 "ldp q8, q9, [%1, #16 * 8]\n\t"
+                 "ldp q10, q11, [%1, #16 * 10]\n\t"
+                 "ldp q12, q13, [%1, #16 * 12]\n\t"
+                 "ldp q14, q15, [%1, #16 * 14]\n\t"
+                 "ldp q16, q17, [%1, #16 * 16]\n\t"
+                 "ldp q18, q19, [%1, #16 * 18]\n\t"
+                 "ldp q20, q21, [%1, #16 * 20]\n\t"
+                 "ldp q22, q23, [%1, #16 * 22]\n\t"
+                 "ldp q24, q25, [%1, #16 * 24]\n\t"
+                 "ldp q26, q27, [%1, #16 * 26]\n\t"
+                 "ldp q28, q29, [%1, #16 * 28]\n\t"
+                 "ldp q30, q31, [%1, #16 * 30]\n\t"
+                 : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs));
 
     WRITE_SYSREG32(v->arch.vfp.fpsr, FPSR);
     WRITE_SYSREG32(v->arch.vfp.fpcr, FPCR);