spin_lock(&irq_map->dom->event_lock);
+ if ( irq_map->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
+ {
+ ASSERT(is_hardware_domain(irq_map->dom));
+ /*
+ * Identity mapped, no need to iterate over the guest GSI list to find
+ * other pirqs sharing the same guest GSI.
+ *
+ * In the identity mapped case the EOI can also be done now, this way
+ * the iteration over the list of domain pirqs is avoided.
+ */
+ hvm_gsi_deassert(irq_map->dom, dpci_pirq(irq_map)->pirq);
+ irq_map->flags |= HVM_IRQ_DPCI_EOI_LATCH;
+ pt_irq_guest_eoi(irq_map->dom, irq_map, NULL);
+ spin_unlock(&irq_map->dom->event_lock);
+ return;
+ }
+
dpci = domain_get_irq_dpci(irq_map->dom);
if ( unlikely(!dpci) )
{
spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
- if ( hvm_irq_dpci == NULL )
+ if ( !hvm_irq_dpci && !is_hardware_domain(d) )
{
unsigned int i;
+ /*
+ * NB: the hardware domain doesn't use a hvm_irq_dpci struct because
+ * it's only allowed to identity map GSIs, and so the data contained in
+ * that struct (used to map guest GSIs into machine GSIs and perform
+ * interrupt routing) is completely useless to it.
+ */
hvm_irq_dpci = xzalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
{
case PT_IRQ_TYPE_PCI:
case PT_IRQ_TYPE_MSI_TRANSLATE:
{
- unsigned int bus = pt_irq_bind->u.pci.bus;
- unsigned int device = pt_irq_bind->u.pci.device;
- unsigned int intx = pt_irq_bind->u.pci.intx;
- unsigned int guest_gsi = hvm_pci_intx_gsi(device, intx);
- unsigned int link = hvm_pci_intx_link(device, intx);
- struct dev_intx_gsi_link *digl = xmalloc(struct dev_intx_gsi_link);
- struct hvm_girq_dpci_mapping *girq =
- xmalloc(struct hvm_girq_dpci_mapping);
+ struct dev_intx_gsi_link *digl = NULL;
+ struct hvm_girq_dpci_mapping *girq = NULL;
+ unsigned int guest_gsi;
- if ( !digl || !girq )
+ /*
+ * Mapping GSIs for the hardware domain is different than doing it for
+ * an unpriviledged guest, the hardware domain is only allowed to
+ * identity map GSIs, and as such all the data in the u.pci union is
+ * discarded.
+ */
+ if ( hvm_irq_dpci )
{
- spin_unlock(&d->event_lock);
- xfree(girq);
- xfree(digl);
- return -ENOMEM;
- }
+ unsigned int link;
+
+ digl = xmalloc(struct dev_intx_gsi_link);
+ girq = xmalloc(struct hvm_girq_dpci_mapping);
+
+ if ( !digl || !girq )
+ {
+ spin_unlock(&d->event_lock);
+ xfree(girq);
+ xfree(digl);
+ return -ENOMEM;
+ }
+
+ girq->bus = digl->bus = pt_irq_bind->u.pci.bus;
+ girq->device = digl->device = pt_irq_bind->u.pci.device;
+ girq->intx = digl->intx = pt_irq_bind->u.pci.intx;
+ list_add_tail(&digl->list, &pirq_dpci->digl_list);
- hvm_irq_dpci->link_cnt[link]++;
+ guest_gsi = hvm_pci_intx_gsi(digl->device, digl->intx);
+ link = hvm_pci_intx_link(digl->device, digl->intx);
- digl->bus = bus;
- digl->device = device;
- digl->intx = intx;
- list_add_tail(&digl->list, &pirq_dpci->digl_list);
+ hvm_irq_dpci->link_cnt[link]++;
- girq->bus = bus;
- girq->device = device;
- girq->intx = intx;
- girq->machine_gsi = pirq;
- list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
+ girq->machine_gsi = pirq;
+ list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
+ }
+ else
+ {
+ ASSERT(is_hardware_domain(d));
+
+ /* MSI_TRANSLATE is not supported for the hardware domain. */
+ if ( pt_irq_bind->irq_type != PT_IRQ_TYPE_PCI ||
+ pirq >= hvm_domain_irq(d)->nr_gsis )
+ return -EINVAL;
+ guest_gsi = pirq;
+ }
/* Bind the same mirq once in the same domain */
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED |
HVM_IRQ_DPCI_MACH_PCI |
HVM_IRQ_DPCI_GUEST_PCI;
- share = BIND_PIRQ__WILL_SHARE;
+ if ( !is_hardware_domain(d) )
+ share = BIND_PIRQ__WILL_SHARE;
+ else
+ {
+ int mask = vioapic_get_mask(d, guest_gsi);
+ int trigger_mode = vioapic_get_trigger_mode(d, guest_gsi);
+
+ if ( mask < 0 || trigger_mode < 0 )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+ pirq_dpci->flags |= HVM_IRQ_DPCI_IDENTITY_GSI;
+ /*
+ * Check if the corresponding vIO APIC pin is configured
+ * level or edge trigger, level triggered interrupts will
+ * be marked as shareable.
+ */
+ ASSERT(!mask);
+ share = trigger_mode;
+ }
}
/* Init timer before binding */
* IRQ_GUEST is not set. As such we can reset 'dom' directly.
*/
pirq_dpci->dom = NULL;
- list_del(&girq->list);
- list_del(&digl->list);
- hvm_irq_dpci->link_cnt[link]--;
+ if ( hvm_irq_dpci )
+ {
+ unsigned int link;
+
+ ASSERT(girq && digl);
+ list_del(&girq->list);
+ list_del(&digl->list);
+ link = hvm_pci_intx_link(digl->device, digl->intx);
+ hvm_irq_dpci->link_cnt[link]--;
+ }
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
spin_unlock(&d->event_lock);
spin_unlock(&d->event_lock);
if ( iommu_verbose )
- printk(XENLOG_G_INFO
- "d%d: bind: m_gsi=%u g_gsi=%u dev=%02x.%02x.%u intx=%u\n",
- d->domain_id, pirq, guest_gsi, bus,
- PCI_SLOT(device), PCI_FUNC(device), intx);
+ {
+ char buf[24] = "";
+
+ if ( digl )
+ snprintf(buf, ARRAY_SIZE(buf), " dev=%02x.%02x.%u intx=%u",
+ digl->bus, PCI_SLOT(digl->device),
+ PCI_FUNC(digl->device), digl->intx);
+
+ printk(XENLOG_G_INFO "d%d: bind: m_gsi=%u g_gsi=%u%s\n",
+ d->domain_id, pirq, guest_gsi, buf);
+ }
break;
}
hvm_irq_dpci = domain_get_irq_dpci(d);
- if ( hvm_irq_dpci == NULL )
+ if ( !hvm_irq_dpci && !is_hardware_domain(d) )
{
spin_unlock(&d->event_lock);
return -EINVAL;
pirq = pirq_info(d, machine_gsi);
pirq_dpci = pirq_dpci(pirq);
- if ( pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
+ if ( hvm_irq_dpci && pt_irq_bind->irq_type != PT_IRQ_TYPE_MSI )
{
unsigned int bus = pt_irq_bind->u.pci.bus;
unsigned int device = pt_irq_bind->u.pci.device;
if ( what && iommu_verbose )
{
unsigned int device = pt_irq_bind->u.pci.device;
+ char buf[24] = "";
+
+ if ( hvm_irq_dpci )
+ snprintf(buf, ARRAY_SIZE(buf), " dev=%02x.%02x.%u intx=%u",
+ pt_irq_bind->u.pci.bus, PCI_SLOT(device),
+ PCI_FUNC(device), pt_irq_bind->u.pci.intx);
- printk(XENLOG_G_INFO
- "d%d %s unmap: m_irq=%u dev=%02x:%02x.%u intx=%u\n",
- d->domain_id, what, machine_gsi, pt_irq_bind->u.pci.bus,
- PCI_SLOT(device), PCI_FUNC(device), pt_irq_bind->u.pci.intx);
+ printk(XENLOG_G_INFO "d%d %s unmap: m_irq=%u%s\n",
+ d->domain_id, what, machine_gsi, buf);
}
return 0;
ASSERT(is_hvm_domain(d));
- if ( !iommu_enabled || !dpci || !pirq_dpci ||
- !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
+ if ( !iommu_enabled || (!is_hardware_domain(d) && !dpci) ||
+ !pirq_dpci || !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
return 0;
pirq_dpci->masked = 1;
static void hvm_dirq_assist(struct domain *d, struct hvm_pirq_dpci *pirq_dpci)
{
- if ( unlikely(!hvm_domain_irq(d)->dpci) )
+ if ( unlikely(!hvm_domain_irq(d)->dpci) && !is_hardware_domain(d) )
{
ASSERT_UNREACHABLE();
return;
list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
{
+ ASSERT(!(pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI));
hvm_pci_intx_assert(d, digl->device, digl->intx);
pirq_dpci->pending++;
}
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_IDENTITY_GSI )
+ {
+ hvm_gsi_assert(d, pirq->pirq);
+ pirq_dpci->pending++;
+ }
+
if ( pirq_dpci->flags & HVM_IRQ_DPCI_TRANSLATE )
{
/* for translated MSI to INTx interrupt, eoi as early as possible */
spin_unlock(&d->event_lock);
}
-static void __hvm_dpci_eoi(struct domain *d,
- const struct hvm_girq_dpci_mapping *girq,
- const union vioapic_redir_entry *ent)
+static void hvm_pirq_eoi(struct pirq *pirq,
+ const union vioapic_redir_entry *ent)
{
- struct pirq *pirq = pirq_info(d, girq->machine_gsi);
- struct hvm_pirq_dpci *pirq_dpci;
-
- if ( !hvm_domain_use_pirq(d, pirq) )
- hvm_pci_intx_deassert(d, girq->device, girq->intx);
-
- pirq_dpci = pirq_dpci(pirq);
+ struct hvm_pirq_dpci *pirq_dpci = pirq_dpci(pirq);
/*
* No need to get vector lock for timer
pirq_guest_eoi(pirq);
}
+static void __hvm_dpci_eoi(struct domain *d,
+ const struct hvm_girq_dpci_mapping *girq,
+ const union vioapic_redir_entry *ent)
+{
+ struct pirq *pirq = pirq_info(d, girq->machine_gsi);
+
+ if ( !hvm_domain_use_pirq(d, pirq) )
+ hvm_pci_intx_deassert(d, girq->device, girq->intx);
+
+ hvm_pirq_eoi(pirq, ent);
+}
+
+static void hvm_gsi_eoi(struct domain *d, unsigned int gsi,
+ const union vioapic_redir_entry *ent)
+{
+ struct pirq *pirq = pirq_info(d, gsi);
+
+ /* Check if GSI is actually mapped. */
+ if ( !pirq_dpci(pirq) )
+ return;
+
+ hvm_gsi_deassert(d, gsi);
+ hvm_pirq_eoi(pirq, ent);
+}
+
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
const union vioapic_redir_entry *ent)
{
if ( !iommu_enabled )
return;
+ if ( is_hardware_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ hvm_gsi_eoi(d, guest_gsi, ent);
+ goto unlock;
+ }
+
if ( guest_gsi < NR_ISAIRQS )
{
hvm_dpci_isairq_eoi(d, guest_gsi);