x86: use only a single branch for upcall-pending exit path checks
authorJan Beulich <jbeulich@suse.com>
Wed, 12 Sep 2012 08:20:18 +0000 (10:20 +0200)
committerJan Beulich <jbeulich@suse.com>
Wed, 12 Sep 2012 08:20:18 +0000 (10:20 +0200)
This utilizes the fact that the two bytes of interest are adjacent to
one another and that the resulting 16-bit values of interest are within
a contiguous range of numbers.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/arch/x86/x86_32/entry.S
xen/arch/x86/x86_64/compat/entry.S
xen/arch/x86/x86_64/entry.S

index 298267931f4447605e7ef952dbbf2f007e871d95..7404eedf542b2e6d918e771d5b87c036bd3ddb03 100644 (file)
@@ -219,10 +219,10 @@ test_all_events:
         jnz  process_nmi
 test_guest_events:
         movl VCPU_vcpu_info(%ebx),%eax
-        testb $0xFF,VCPUINFO_upcall_mask(%eax)
-        jnz  restore_all_guest
-        testb $0xFF,VCPUINFO_upcall_pending(%eax)
-        jz   restore_all_guest
+        movzwl VCPUINFO_upcall_pending(%eax),%eax
+        decl %eax
+        cmpl $0xfe,%eax
+        ja   restore_all_guest
 /*process_guest_events:*/
         sti
         leal VCPU_trap_bounce(%ebx),%edx
index f49ff2d72b42002a5bdce76f3c2d2761cbeb1c1e..2f606ab92545c42c99085a0f7f0e956e6b0a8c94 100644 (file)
@@ -108,10 +108,10 @@ ENTRY(compat_test_all_events)
         jnz   compat_process_nmi
 compat_test_guest_events:
         movq  VCPU_vcpu_info(%rbx),%rax
-        testb $0xFF,COMPAT_VCPUINFO_upcall_mask(%rax)
-        jnz   compat_restore_all_guest
-        testb $0xFF,COMPAT_VCPUINFO_upcall_pending(%rax)
-        jz    compat_restore_all_guest
+        movzwl COMPAT_VCPUINFO_upcall_pending(%rax),%eax
+        decl  %eax
+        cmpl  $0xfe,%eax
+        ja    compat_restore_all_guest
 /*compat_process_guest_events:*/
         sti
         leaq  VCPU_trap_bounce(%rbx),%rdx
index 997bc94c4e47930fd78361e1954bee2787946b3f..8156827d41927b4e7f21821da161dc98b6fed902 100644 (file)
@@ -199,8 +199,8 @@ test_all_events:
         movl  VCPU_processor(%rbx),%eax
         shl   $IRQSTAT_shift,%rax
         leaq  irq_stat(%rip),%rcx
-        testl $~0,(%rcx,%rax,1)
-        jnz   process_softirqs
+        cmpl  $0,(%rcx,%rax,1)
+        jne   process_softirqs
         testb $1,VCPU_mce_pending(%rbx)
         jnz   process_mce
 .Ltest_guest_nmi:
@@ -208,10 +208,10 @@ test_all_events:
         jnz   process_nmi
 test_guest_events:
         movq  VCPU_vcpu_info(%rbx),%rax
-        testb $0xFF,VCPUINFO_upcall_mask(%rax)
-        jnz   restore_all_guest
-        testb $0xFF,VCPUINFO_upcall_pending(%rax)
-        jz    restore_all_guest
+        movzwl VCPUINFO_upcall_pending(%rax),%eax
+        decl  %eax
+        cmpl  $0xfe,%eax
+        ja    restore_all_guest
 /*process_guest_events:*/
         sti
         leaq  VCPU_trap_bounce(%rbx),%rdx