static void nmi_softirq(void)
{
/* Only used to defer wakeup of dom0,vcpu0 to a safe (non-NMI) context. */
- evtchn_notify(dom0->vcpu[0]);
+ vcpu_kick(dom0->vcpu[0]);
}
static void nmi_dom0_report(unsigned int reason_idx)
if ( !test_bit (port, s->evtchn_mask) &&
!test_and_set_bit(port / BITS_PER_LONG,
- &v->vcpu_info->evtchn_pending_sel) &&
- !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
+ &v->vcpu_info->evtchn_pending_sel) )
{
- evtchn_notify(v);
+ vcpu_mark_events_pending(v);
}
/* Check if some VCPU might be polling for this event. */
if ( test_and_clear_bit(port, s->evtchn_mask) &&
test_bit (port, s->evtchn_pending) &&
!test_and_set_bit (port / BITS_PER_LONG,
- &v->vcpu_info->evtchn_pending_sel) &&
- !test_and_set_bit (0, &v->vcpu_info->evtchn_upcall_pending) )
+ &v->vcpu_info->evtchn_pending_sel) )
{
- evtchn_notify(v);
+ vcpu_mark_events_pending(v);
}
spin_unlock(&d->evtchn_lock);
#include <public/arch-ia64.h>
#include <asm/vcpu.h>
-static inline void evtchn_notify(struct vcpu *v)
+static inline void vcpu_kick(struct vcpu *v)
{
/*
* NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
}
+static inline void vcpu_mark_events_pending(struct vcpu *v)
+{
+ if ( !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
+ vcpu_kick(v);
+}
+
/* Note: Bitwise operations result in fast code with no branches. */
#define event_pending(v) \
(!!(v)->vcpu_info->evtchn_upcall_pending & \
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
-static inline void evtchn_notify(struct vcpu *v)
+static inline void vcpu_kick(struct vcpu *v)
{
/*
* NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
smp_send_event_check_cpu(v->processor);
}
+static inline void vcpu_mark_events_pending(struct vcpu *v)
+{
+ if ( !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
+ vcpu_kick(v);
+}
+
static inline int local_events_need_delivery(void)
{
struct vcpu *v = current;