[XEN] Replace direct common-code access of evtchn_upcall_mask
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 10 Jun 2006 10:07:11 +0000 (11:07 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 10 Jun 2006 10:07:11 +0000 (11:07 +0100)
with local_event_delivery_* accessors.
Notes:
 1. Still some (read-only, debug) use in keyhandler.c
 2. Still accesses through current->vcpu_info.
Both above may need to be compiled only for architectures
that use event channels.

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/common/event_channel.c
xen/common/schedule.c
xen/include/asm-ia64/event.h
xen/include/asm-ia64/vmx_vcpu.h
xen/include/asm-x86/event.h
xen/include/xen/event.h
xen/include/xen/sched.h

index 6218773a1e276cd759b940b5007041d4cc71055e..5437496622374db73b264b3239692ca2fec7ad77 100644 (file)
@@ -499,7 +499,7 @@ void evtchn_set_pending(struct vcpu *v, int port)
         evtchn_notify(v);
     }
     else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
-                       v->vcpu_info->evtchn_upcall_mask) )
+                       !local_event_delivery_is_enabled()) )
     {
         /*
          * Blocked and masked will usually mean that the VCPU executed 
index 32aebd3b83534779f5881cfc27aff8c3d6fd41a7..670ebe8fa16cb1d42bbf907083c61a799ce9d6a8 100644 (file)
@@ -199,11 +199,11 @@ static long do_block(void)
 {
     struct vcpu *v = current;
 
-    v->vcpu_info->evtchn_upcall_mask = 0;
+    local_event_delivery_enable();
     set_bit(_VCPUF_blocked, &v->vcpu_flags);
 
     /* Check for events /after/ blocking: avoids wakeup waiting race. */
-    if ( event_pending(v) )
+    if ( local_events_need_delivery() )
     {
         clear_bit(_VCPUF_blocked, &v->vcpu_flags);
     }
@@ -230,8 +230,8 @@ static long do_poll(struct sched_poll *sched_poll)
     if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
         return -EFAULT;
 
-    /* Ensure that upcalls are disabled: tested by evtchn_set_pending(). */
-    if ( !v->vcpu_info->evtchn_upcall_mask )
+    /* Ensure that events are disabled: tested by evtchn_set_pending(). */
+    if ( local_event_delivery_is_enabled() )
         return -EINVAL;
 
     set_bit(_VCPUF_blocked, &v->vcpu_flags);
@@ -248,7 +248,7 @@ static long do_poll(struct sched_poll *sched_poll)
             goto out;
 
         rc = 0;
-        if ( evtchn_pending(v->domain, port) )
+        if ( test_bit(port, v->domain->shared_info->evtchn_pending) )
             goto out;
     }
 
index 2a1d55cec5b4feccb39b3c19de8a276988ff185e..28dde7fe508d9636a40c7fdf661a61fea5e35716 100644 (file)
@@ -37,6 +37,26 @@ static inline void evtchn_notify(struct vcpu *v)
     (!!(v)->vcpu_info->evtchn_upcall_pending &  \
       !(v)->vcpu_info->evtchn_upcall_mask)
 
+static inline int local_events_need_delivery(void)
+{
+    return event_pending(current);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+    return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+    current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+    current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
 static inline int arch_virq_is_global(int virq)
 {
     int rc;
index bbbc6536cbe6149a8b3a410f6e3c703f067e6cd1..90eb33fd15265bb2c1b99dbabd4ba6ea204a2a8f 100644 (file)
@@ -488,6 +488,4 @@ vcpu_get_vhpt(VCPU *vcpu)
     return &vcpu->arch.vhpt;
 }
 
-#define check_work_pending(v)  \
-    (event_pending((v)) || ((v)->arch.irq_new_pending))
 #endif
index b51d1acafacaa289b9bebc6a09e070228e8021ed..c5b3c4526e7817a489082849a060097c7cb66b73 100644 (file)
@@ -26,10 +26,28 @@ static inline void evtchn_notify(struct vcpu *v)
         smp_send_event_check_cpu(v->processor);
 }
 
-/* Note: Bitwise operations result in fast code with no branches. */
-#define event_pending(v)                        \
-    (!!(v)->vcpu_info->evtchn_upcall_pending &  \
-      !(v)->vcpu_info->evtchn_upcall_mask)
+static inline int local_events_need_delivery(void)
+{
+    struct vcpu *v = current;
+    /* Note: Bitwise operations result in fast code with no branches. */
+    return (!!v->vcpu_info->evtchn_upcall_pending &
+             !v->vcpu_info->evtchn_upcall_mask);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+    return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+    current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+    current->vcpu_info->evtchn_upcall_mask = 0;
+}
 
 /* No arch specific virq definition now. Default to global. */
 static inline int arch_virq_is_global(int virq)
index f7dee3b75d3b2000ea3ddc5a3a782cc6a93135df..7b0d428b67bd96d41a0de44ebcfd5634095b571c 100644 (file)
@@ -38,9 +38,6 @@ extern void send_guest_global_virq(struct domain *d, int virq);
  */
 extern void send_guest_pirq(struct domain *d, int pirq);
 
-#define evtchn_pending(d, p)                    \
-    (test_bit((p), &(d)->shared_info->evtchn_pending[0]))
-
 /* Send a notification from a local event-channel port. */
 extern long evtchn_send(unsigned int lport);
 
index e0c71b367a7d286c7814ea0f965db15b66ce5677..bde06f79e6c263161983b36cf0cf221f0e50a7a1 100644 (file)
@@ -318,7 +318,7 @@ unsigned long hypercall_create_continuation(
 
 #define hypercall_preempt_check() (unlikely(    \
         softirq_pending(smp_processor_id()) |   \
-        event_pending(current)                  \
+        local_events_need_delivery()            \
     ))
 
 /* This domain_hash and domain_list are protected by the domlist_lock. */