ret = -EFAULT;
if ( copy_from_guest(&eoi, arg, 1) != 0 )
break;
+ ret = -EINVAL;
+ if ( eoi.irq < 0 || eoi.irq >= NR_IRQS )
+ break;
+ if ( current->domain->arch.pirq_eoi_map )
+ evtchn_unmask(current->domain->pirq_to_evtchn[eoi.irq]);
ret = pirq_guest_eoi(current->domain, eoi.irq);
break;
}
+ case PHYSDEVOP_pirq_eoi_gmfn: {
+ struct physdev_pirq_eoi_gmfn info;
+ unsigned long mfn;
+
+ BUILD_BUG_ON(NR_IRQS > (PAGE_SIZE * 8));
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&info, arg, 1) != 0 )
+ break;
+
+ ret = -EINVAL;
+ mfn = gmfn_to_mfn(current->domain, info.gmfn);
+ if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), current->domain) )
+ break;
+
+ if ( cmpxchg(¤t->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
+ {
+ put_page(mfn_to_page(mfn));
+ ret = -EBUSY;
+ break;
+ }
+
+ current->domain->arch.pirq_eoi_map = mfn_to_virt(mfn);
+ ret = 0;
+ break;
+ }
+
/* Legacy since 0x00030202. */
case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
ret = pirq_guest_unmask(current->domain);
struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
+static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
+{
+ if ( d->arch.pirq_eoi_map )
+ set_bit(irq, d->arch.pirq_eoi_map);
+}
+
+static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
+{
+ if ( d->arch.pirq_eoi_map )
+ clear_bit(irq, d->arch.pirq_eoi_map);
+}
+
+static void _irq_guest_eoi(irq_desc_t *desc)
+{
+ irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
+ unsigned int i, vector = desc - irq_desc;
+
+ if ( !(desc->status & IRQ_GUEST_EOI_PENDING) )
+ return;
+
+ for ( i = 0; i < action->nr_guests; ++i )
+ clear_pirq_eoi(action->guest[i], vector);
+
+ desc->status &= ~(IRQ_INPROGRESS|IRQ_GUEST_EOI_PENDING);
+ desc->handler->enable(vector);
+}
+
static struct timer irq_guest_eoi_timer[NR_IRQS];
static void irq_guest_eoi_timer_fn(void *data)
{
irq_desc_t *desc = data;
- unsigned vector = desc - irq_desc;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_INPROGRESS;
- desc->handler->enable(vector);
+ _irq_guest_eoi(desc);
spin_unlock_irqrestore(&desc->lock, flags);
}
if ( already_pending == action->nr_guests )
{
- desc->handler->disable(irq);
stop_timer(&irq_guest_eoi_timer[irq]);
+ desc->handler->disable(irq);
+ desc->status |= IRQ_GUEST_EOI_PENDING;
+ for ( i = 0; i < already_pending; ++i )
+ {
+ d = action->guest[i];
+ set_pirq_eoi(d, irq);
+ /*
+ * Could check here whether the guest unmasked the event by now
+ * (or perhaps just re-issue the send_guest_pirq()), and if it
+ * can now accept the event,
+ * - clear all the pirq_eoi bits we already set,
+ * - re-enable the vector, and
+ * - skip the timer setup below.
+ */
+ }
init_timer(&irq_guest_eoi_timer[irq],
irq_guest_eoi_timer_fn, desc, smp_processor_id());
set_timer(&irq_guest_eoi_timer[irq], NOW() + MILLISECS(1));
int pirq_guest_eoi(struct domain *d, int irq)
{
irq_desc_t *desc;
+ irq_guest_action_t *action;
if ( (irq < 0) || (irq >= NR_IRQS) )
return -EINVAL;
desc = &irq_desc[irq];
spin_lock_irq(&desc->lock);
- if ( test_and_clear_bit(irq, &d->pirq_mask) &&
- (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
+ action = (irq_guest_action_t *)desc->action;
+
+ if ( action->ack_type == ACKTYPE_NONE )
+ {
+ ASSERT(!test_bit(irq, d->pirq_mask));
+ stop_timer(&irq_guest_eoi_timer[irq]);
+ _irq_guest_eoi(desc);
+ }
+
+ if ( test_and_clear_bit(irq, &d->pirq_mask) && (--action->in_flight == 0) )
{
- ASSERT(((irq_guest_action_t*)desc->action)->ack_type == ACKTYPE_UNMASK);
+ ASSERT(action->ack_type == ACKTYPE_UNMASK);
desc->handler->end(irq);
}
spin_unlock_irq(&desc->lock);
action->guest[action->nr_guests++] = v->domain;
+ if ( action->ack_type != ACKTYPE_NONE )
+ set_pirq_eoi(v->domain, irq);
+ else
+ clear_pirq_eoi(v->domain, irq);
+
out:
spin_unlock_irqrestore(&desc->lock, flags);
return rc;