with local_event_delivery_* accessors.
Notes:
1. Still some (read-only, debug) use in keyhandler.c
2. Still accesses through current->vcpu_info.
Both above may need to be compiled only for architectures
that use event channels.
Signed-off-by: Keir Fraser <keir@xensource.com>
evtchn_notify(v);
}
else if ( unlikely(test_bit(_VCPUF_blocked, &v->vcpu_flags) &&
- v->vcpu_info->evtchn_upcall_mask) )
+ !local_event_delivery_is_enabled()) )
{
/*
* Blocked and masked will usually mean that the VCPU executed
{
struct vcpu *v = current;
- v->vcpu_info->evtchn_upcall_mask = 0;
+ local_event_delivery_enable();
set_bit(_VCPUF_blocked, &v->vcpu_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
- if ( event_pending(v) )
+ if ( local_events_need_delivery() )
{
clear_bit(_VCPUF_blocked, &v->vcpu_flags);
}
if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
return -EFAULT;
- /* Ensure that upcalls are disabled: tested by evtchn_set_pending(). */
- if ( !v->vcpu_info->evtchn_upcall_mask )
+ /* Ensure that events are disabled: tested by evtchn_set_pending(). */
+ if ( local_event_delivery_is_enabled() )
return -EINVAL;
set_bit(_VCPUF_blocked, &v->vcpu_flags);
goto out;
rc = 0;
- if ( evtchn_pending(v->domain, port) )
+ if ( test_bit(port, v->domain->shared_info->evtchn_pending) )
goto out;
}
(!!(v)->vcpu_info->evtchn_upcall_pending & \
!(v)->vcpu_info->evtchn_upcall_mask)
+static inline int local_events_need_delivery(void)
+{
+ return event_pending(current);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+ return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
static inline int arch_virq_is_global(int virq)
{
int rc;
return &vcpu->arch.vhpt;
}
-#define check_work_pending(v) \
- (event_pending((v)) || ((v)->arch.irq_new_pending))
#endif
smp_send_event_check_cpu(v->processor);
}
-/* Note: Bitwise operations result in fast code with no branches. */
-#define event_pending(v) \
- (!!(v)->vcpu_info->evtchn_upcall_pending & \
- !(v)->vcpu_info->evtchn_upcall_mask)
+static inline int local_events_need_delivery(void)
+{
+ struct vcpu *v = current;
+ /* Note: Bitwise operations result in fast code with no branches. */
+ return (!!v->vcpu_info->evtchn_upcall_pending &
+ !v->vcpu_info->evtchn_upcall_mask);
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+ return !current->vcpu_info->evtchn_upcall_mask;
+}
+
+static inline void local_event_delivery_disable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 1;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+ current->vcpu_info->evtchn_upcall_mask = 0;
+}
/* No arch specific virq definition now. Default to global. */
static inline int arch_virq_is_global(int virq)
*/
extern void send_guest_pirq(struct domain *d, int pirq);
-#define evtchn_pending(d, p) \
- (test_bit((p), &(d)->shared_info->evtchn_pending[0]))
-
/* Send a notification from a local event-channel port. */
extern long evtchn_send(unsigned int lport);
#define hypercall_preempt_check() (unlikely( \
softirq_pending(smp_processor_id()) | \
- event_pending(current) \
+ local_events_need_delivery() \
))
/* This domain_hash and domain_list are protected by the domlist_lock. */