xen/arm: Add ARCH_WORKAROUND_2 support for guests
authorJulien Grall <julien.grall@arm.com>
Tue, 12 Jun 2018 11:36:36 +0000 (12:36 +0100)
committerJulien Grall <julien.grall@arm.com>
Fri, 22 Jun 2018 01:59:42 +0000 (02:59 +0100)
In order to offer ARCH_WORKAROUND_2 support to guests, we need to track the
state of the workaround per-vCPU. The field 'pad' in cpu_info is now
repurposed to store flags easily accessible in assembly.

As the hypervisor will always run with the workaround enabled, we may
need to enable (on guest exit) or disable (on guest entry) the
workaround.

A follow-up patch will add fastpath for the workaround for arm64 guests.

Note that check_workaround_ssbd() is used instead of ssbd_get_state()
because the former is implemented using an alternative. Thefore the code
will be shortcut on affected platform.

This is part of XSA-263.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
xen/arch/arm/domain.c
xen/arch/arm/traps.c
xen/arch/arm/vsmc.c
xen/include/asm-arm/current.h

index 5a2a9a6b83b5c0e992a81eb674c0094958d8dc37..4baecc24476025603d4f82b0a78887f812dc6211 100644 (file)
@@ -21,6 +21,7 @@
 #include <xen/wait.h>
 
 #include <asm/alternative.h>
+#include <asm/cpuerrata.h>
 #include <asm/cpufeature.h>
 #include <asm/current.h>
 #include <asm/event.h>
@@ -572,6 +573,13 @@ int vcpu_initialise(struct vcpu *v)
     if ( (rc = vcpu_vtimer_init(v)) != 0 )
         goto fail;
 
+    /*
+     * The workaround 2 (i.e SSBD mitigation) is enabled by default if
+     * supported.
+     */
+    if ( get_ssbd_state() == ARM_SSBD_RUNTIME )
+        v->arch.cpu_info->flags |= CPUINFO_WORKAROUND_2_FLAG;
+
     return rc;
 
 fail:
index d71adfa745a32744190549df709b03c9231208a9..e47ec8aad5e1bb78a86656ed2b06f7056b83ad75 100644 (file)
@@ -2021,10 +2021,23 @@ inject_abt:
         inject_iabt_exception(regs, gva, hsr.len);
 }
 
+static inline bool needs_ssbd_flip(struct vcpu *v)
+{
+    if ( !check_workaround_ssbd() )
+        return false;
+
+    return !(v->arch.cpu_info->flags & CPUINFO_WORKAROUND_2_FLAG) &&
+             cpu_require_ssbd_mitigation();
+}
+
 static void enter_hypervisor_head(struct cpu_user_regs *regs)
 {
     if ( guest_mode(regs) )
     {
+        /* If the guest has disabled the workaround, bring it back on. */
+        if ( needs_ssbd_flip(current) )
+            arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 1, NULL);
+
         /*
          * If we pended a virtual abort, preserve it until it gets cleared.
          * See ARM ARM DDI 0487A.j D1.14.3 (Virtual Interrupts) for details,
@@ -2270,6 +2283,13 @@ void leave_hypervisor_tail(void)
              */
             SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT);
 
+            /*
+             * The hypervisor runs with the workaround always present.
+             * If the guest wants it disabled, so be it...
+             */
+            if ( needs_ssbd_flip(current) )
+                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL);
+
             return;
         }
         local_irq_enable();
index 40a80d5760e84a71101fd5f0de31ce3081d0e0e3..c4ccae603052965aab9408c93411b4e914183c8c 100644 (file)
@@ -18,6 +18,7 @@
 #include <xen/lib.h>
 #include <xen/types.h>
 #include <public/arch-arm/smccc.h>
+#include <asm/cpuerrata.h>
 #include <asm/cpufeature.h>
 #include <asm/monitor.h>
 #include <asm/regs.h>
@@ -104,6 +105,23 @@ static bool handle_arch(struct cpu_user_regs *regs)
             if ( cpus_have_cap(ARM_HARDEN_BRANCH_PREDICTOR) )
                 ret = 0;
             break;
+        case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
+            switch ( get_ssbd_state() )
+            {
+            case ARM_SSBD_UNKNOWN:
+            case ARM_SSBD_FORCE_DISABLE:
+                break;
+
+            case ARM_SSBD_RUNTIME:
+                ret = ARM_SMCCC_SUCCESS;
+                break;
+
+            case ARM_SSBD_FORCE_ENABLE:
+            case ARM_SSBD_MITIGATED:
+                ret = ARM_SMCCC_NOT_REQUIRED;
+                break;
+            }
+            break;
         }
 
         set_user_reg(regs, 0, ret);
@@ -114,6 +132,25 @@ static bool handle_arch(struct cpu_user_regs *regs)
     case ARM_SMCCC_ARCH_WORKAROUND_1_FID:
         /* No return value */
         return true;
+
+    case ARM_SMCCC_ARCH_WORKAROUND_2_FID:
+    {
+        bool enable = (uint32_t)get_user_reg(regs, 1);
+
+        /*
+         * ARM_WORKAROUND_2_FID should only be called when mitigation
+         * state can be changed at runtime.
+         */
+        if ( unlikely(get_ssbd_state() != ARM_SSBD_RUNTIME) )
+            return true;
+
+        if ( enable )
+            get_cpu_info()->flags |= CPUINFO_WORKAROUND_2_FLAG;
+        else
+            get_cpu_info()->flags &= ~CPUINFO_WORKAROUND_2_FLAG;
+
+        return true;
+    }
     }
 
     return false;
index 7a0971fdea6a0deb1d2398b0bac74a828d459641..f9819b34fcb76ea50d3e79ad6a0079df49f095aa 100644 (file)
@@ -7,6 +7,10 @@
 #include <asm/percpu.h>
 #include <asm/processor.h>
 
+/* Tell whether the guest vCPU enabled Workaround 2 (i.e variant 4) */
+#define CPUINFO_WORKAROUND_2_FLAG_SHIFT   0
+#define CPUINFO_WORKAROUND_2_FLAG (_AC(1, U) << CPUINFO_WORKAROUND_2_FLAG_SHIFT)
+
 #ifndef __ASSEMBLY__
 
 struct vcpu;
@@ -21,7 +25,7 @@ DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
 struct cpu_info {
     struct cpu_user_regs guest_cpu_user_regs;
     unsigned long elr;
-    unsigned int pad;
+    uint32_t flags;
 };
 
 static inline struct cpu_info *get_cpu_info(void)