static void ack_msi_vector(unsigned int vector)
{
- ack_APIC_irq();
+ if ( msi_maskable_irq(irq_desc[vector].msi_desc) )
+ ack_APIC_irq(); /* ACKTYPE_NONE */
}
static void end_msi_vector(unsigned int vector)
{
+ if ( !msi_maskable_irq(irq_desc[vector].msi_desc) )
+ ack_APIC_irq(); /* ACKTYPE_EOI */
}
static void shutdown_msi_vector(unsigned int vector)
spin_lock_irq(&desc->lock);
}
break;
+ case ACKTYPE_NONE:
+ stop_timer(&irq_guest_eoi_timer[vector]);
+ _irq_guest_eoi(desc);
+ break;
}
/*
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
- /* XXX Until pcidev and msi locking is fixed. */
- if ( type == MAP_PIRQ_TYPE_MSI )
- return -EINVAL;
-
if ( !IS_PRIV(current->domain) )
return -EPERM;
if ( (irq < 0) || (irq >= NR_IRQS) )
break;
irq_status_query.flags = 0;
- if ( pirq_acktype(v->domain, irq) != 0 )
- irq_status_query.flags |= XENIRQSTAT_needs_eoi;
+ /*
+ * Even edge-triggered or message-based IRQs can need masking from
+ * time to time. If teh guest is not dynamically checking for this
+ * via the new pirq_eoi_map mechanism, it must conservatively always
+ * execute the EOI hypercall. In practice, this only really makes a
+ * difference for maskable MSI sources, and if those are supported
+ * then dom0 is probably modern anyway.
+ */
+ irq_status_query.flags |= XENIRQSTAT_needs_eoi;
if ( pirq_shared(v->domain, irq) )
irq_status_query.flags |= XENIRQSTAT_shared;
ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
-int pirq_acktype(struct domain *d, int irq);
int pirq_shared(struct domain *d , int irq);
int map_domain_pirq(struct domain *d, int pirq, int vector, int type,