x86/hypercall: Make the HVM hcall_preempted boolean common
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 14 Feb 2017 17:02:04 +0000 (17:02 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 16 Feb 2017 14:15:25 +0000 (14:15 +0000)
HVM guests currently make use of arch.hvm_vcpu.hcall_preempted to track
hypercall preemption in struct vcpu.  Move this boolean to being common at the
top level of struct vcpu, which will allow it to be reused elsewhere.

Alter the PV preemption logic to use this boolean.  This simplifies the code
by removing guest-type-specific knowledge, and removes the risk of accidently
skipping backwards or forwards multiple times and corrupting %rip.

In pv_hypercall() the old_rip bodge can be removed, and parameter clobbering
can happen based on a more obvious condition.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/domain.c
xen/arch/x86/hvm/hypercall.c
xen/arch/x86/hypercall.c
xen/include/asm-x86/hvm/vcpu.h
xen/include/xen/sched.h

index 3c9319575746b1c1f450952c79e8eca15f983755..3fb1bdcec8658172122795c2f17a2fd296118623 100644 (file)
@@ -2197,20 +2197,12 @@ void sync_vcpu_execstate(struct vcpu *v)
 
 void hypercall_cancel_continuation(void)
 {
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct mc_state *mcs = &current->mc_state;
 
     if ( mcs->flags & MCSF_in_multicall )
-    {
         __clear_bit(_MCSF_call_preempted, &mcs->flags);
-    }
     else
-    {
-        if ( is_pv_vcpu(current) )
-            regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */
-        else
-            current->arch.hvm_vcpu.hcall_preempted = 0;
-    }
+        current->hcall_preempted = false;
 }
 
 unsigned long hypercall_create_continuation(
@@ -2238,11 +2230,7 @@ unsigned long hypercall_create_continuation(
 
         regs->rax = op;
 
-        /* Ensure the hypercall trap instruction is re-executed. */
-        if ( is_pv_vcpu(curr) )
-            regs->rip -= 2;  /* re-execute 'syscall' / 'int $xx' */
-        else
-            curr->arch.hvm_vcpu.hcall_preempted = 1;
+        curr->hcall_preempted = true;
 
         if ( is_pv_vcpu(curr) ?
              !is_pv_32bit_vcpu(curr) :
index 5dbd54adafc626f7f05e6eb8f5a64412274506ae..fe7802b1074a6a77f3aa2ccf74a39e7d0b67cfe2 100644 (file)
@@ -176,7 +176,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
         return HVM_HCALL_completed;
     }
 
-    curr->arch.hvm_vcpu.hcall_preempted = 0;
+    curr->hcall_preempted = false;
 
     if ( mode == 8 )
     {
@@ -210,7 +210,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
         curr->arch.hvm_vcpu.hcall_64bit = 0;
 
 #ifndef NDEBUG
-        if ( !curr->arch.hvm_vcpu.hcall_preempted )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].native )
@@ -254,7 +254,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
                                                     ebp);
 
 #ifndef NDEBUG
-        if ( !curr->arch.hvm_vcpu.hcall_preempted )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
@@ -272,7 +272,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
 
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
 
-    if ( curr->arch.hvm_vcpu.hcall_preempted )
+    if ( curr->hcall_preempted )
         return HVM_HCALL_preempted;
 
     if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) &&
index 8dd19de8c80be7563ddec2cedc516f82a9a7ee76..945afa0e0142a53e893ec1f18551b91352fcf386 100644 (file)
@@ -141,9 +141,6 @@ static const hypercall_table_t pv_hypercall_table[] = {
 void pv_hypercall(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
-#ifndef NDEBUG
-    unsigned long old_rip = regs->rip;
-#endif
     unsigned long eax;
 
     ASSERT(guest_kernel_mode(curr, regs));
@@ -160,6 +157,8 @@ void pv_hypercall(struct cpu_user_regs *regs)
         return;
     }
 
+    curr->hcall_preempted = false;
+
     if ( !is_pv_32bit_vcpu(curr) )
     {
         unsigned long rdi = regs->rdi;
@@ -191,7 +190,7 @@ void pv_hypercall(struct cpu_user_regs *regs)
         regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
 
 #ifndef NDEBUG
-        if ( regs->rip == old_rip )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].native )
@@ -238,7 +237,7 @@ void pv_hypercall(struct cpu_user_regs *regs)
         regs->_eax = pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp);
 
 #ifndef NDEBUG
-        if ( regs->rip == old_rip )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
@@ -254,6 +253,14 @@ void pv_hypercall(struct cpu_user_regs *regs)
 #endif
     }
 
+    /*
+     * PV guests use SYSCALL or INT $0x82 to make a hypercall, both of which
+     * have trap semantics.  If the hypercall has been preempted, rewind the
+     * instruction pointer to reexecute the instruction.
+     */
+    if ( curr->hcall_preempted )
+        regs->rip -= 2;
+
     perfc_incr(hypercalls);
 }
 
index e5eeb5ff5d0a2cbb9e06930f329f2dc8b57a255b..6d5553d4dbb3c4e7cceef683ebc97f83255fd638 100644 (file)
@@ -166,7 +166,6 @@ struct hvm_vcpu {
     bool                debug_state_latch;
     bool                single_step;
 
-    bool                hcall_preempted;
     bool                hcall_64bit;
 
     struct hvm_vcpu_asid n1asid;
index 32893de22e46ec4c1e920c8f74b7d80f2c48c7f6..1b1c26abeb63d1157f5312f1c9d8c9f11b8c6c5e 100644 (file)
@@ -203,6 +203,9 @@ struct vcpu
     /* VCPU need affinity restored */
     bool             affinity_broken;
 
+    /* A hypercall has been preempted. */
+    bool             hcall_preempted;
+
 
     /*
      * > 0: a single port is being polled;