v_target = d->arch.vgic.handler->get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
- /* We need to force the first injection of evtchn_irq because
- * evtchn_upcall_pending is already set by common code on vcpu
- * creation. */
- if ( irq == v_target->domain->arch.evtchn_irq &&
- vcpu_info(current, evtchn_upcall_pending) &&
- list_empty(&p->inflight) )
- vgic_vcpu_inject_irq(v_target, irq);
- else {
- unsigned long flags;
- spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
- if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
- gic_raise_guest_irq(v_target, irq, p->priority);
- spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
- }
+ spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
+ if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+ gic_raise_guest_irq(v_target, irq, p->priority);
+ spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
if ( p->desc != NULL )
{
irq_set_affinity(p->desc, cpumask_of(v_target->processor));
vgic_vcpu_inject_irq(v, irq);
}
+void arch_evtchn_inject(struct vcpu *v)
+{
+ vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+}
+
/*
* Local variables:
* mode: C
return !hvm_interrupt_blocked(v, intack);
}
+void arch_evtchn_inject(struct vcpu *v)
+{
+ if ( has_hvm_container_vcpu(v) )
+ hvm_assert_evtchn_irq(v);
+}
+
static void irq_dump(struct domain *d)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
/* Notify remote end of a Xen-attached event channel.*/
void notify_via_xen_event_channel(struct domain *ld, int lport);
+/* Inject an event channel notification into the guest */
+void arch_evtchn_inject(struct vcpu *v);
+
/*
* Internal event channel object storage.
*