vtd: Fix bug #1306: Dom0 hangs when destroying guest with MSI NIC assigned
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 31 Jul 2008 08:51:06 +0000 (09:51 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 31 Jul 2008 08:51:06 +0000 (09:51 +0100)
Signed-off-by: Shan Haitao <haitao.shan@intel.com>
xen/arch/x86/msi.c
xen/drivers/passthrough/iommu.c
xen/drivers/passthrough/pci.c

index d6c4d0b1e61963b4efcae72ed69369e9b0282312..91e725049a3c2c382fc54f9614f70c717e66d072 100644 (file)
@@ -761,14 +761,13 @@ retry:
     {
         desc = &irq_desc[entry->vector];
 
-       local_irq_save(flags);
-       if ( !spin_trylock(&desc->lock) )
-       {
-           local_irq_restore(flags);
-           goto retry;
-       }
-
-        spin_lock_irqsave(&desc->lock, flags);
+        local_irq_save(flags);
+        if ( !spin_trylock(&desc->lock) )
+        {
+             local_irq_restore(flags);
+            goto retry;
+        }
+
         if ( desc->handler == &pci_msi_type )
         {
             /* MSI is not shared, so should be released already */
index 2e2afaeb1302e3f71a82bd8b2fffe53a7d69b7fb..0a3fc48da9dae3cf280833f5ae3c5320452e51a3 100644 (file)
@@ -126,14 +126,12 @@ static int iommu_populate_page_table(struct domain *d)
     return 0;
 }
 
+
 void iommu_domain_destroy(struct domain *d)
 {
-    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
-    uint32_t i;
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
-    struct list_head *ioport_list, *digl_list, *tmp;
+    struct list_head *ioport_list, *tmp;
     struct g2m_ioport *ioport;
-    struct dev_intx_gsi_link *digl;
 
     if ( !iommu_enabled || !hd->platform_ops )
         return;
@@ -148,30 +146,6 @@ void iommu_domain_destroy(struct domain *d)
         return;
     }
 
-    if ( hvm_irq_dpci != NULL )
-    {
-        for ( i = 0; i < NR_IRQS; i++ )
-        {
-            if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
-                continue;
-
-            pirq_guest_unbind(d, i);
-            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
-
-            list_for_each_safe ( digl_list, tmp,
-                                 &hvm_irq_dpci->mirq[i].digl_list )
-            {
-                digl = list_entry(digl_list,
-                                  struct dev_intx_gsi_link, list);
-                list_del(&digl->list);
-                xfree(digl);
-            }
-        }
-
-        d->arch.hvm_domain.irq.dpci = NULL;
-        xfree(hvm_irq_dpci);
-    }
-
     if ( hd )
     {
         list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
index b38e04e908e0ff278591f3ece3bb070886fce02d..df7161d53966f177a68b907db2c87eb69dd33435 100644 (file)
@@ -152,11 +152,50 @@ int pci_remove_device(u8 bus, u8 devfn)
     return ret;
 }
 
+static void pci_clean_dpci_irqs(struct domain *d)
+{
+    struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+    uint32_t i;
+    struct list_head *digl_list, *tmp;
+    struct dev_intx_gsi_link *digl;
+
+    if ( !iommu_enabled )
+        return;
+
+    if ( !is_hvm_domain(d) && !need_iommu(d) )
+        return;
+
+    if ( hvm_irq_dpci != NULL )
+    {
+        for ( i = 0; i < NR_IRQS; i++ )
+        {
+            if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) )
+                continue;
+
+            pirq_guest_unbind(d, i);
+            kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]);
+
+            list_for_each_safe ( digl_list, tmp,
+                                 &hvm_irq_dpci->mirq[i].digl_list )
+            {
+                digl = list_entry(digl_list,
+                                  struct dev_intx_gsi_link, list);
+                list_del(&digl->list);
+                xfree(digl);
+            }
+        }
+
+        d->arch.hvm_domain.irq.dpci = NULL;
+        xfree(hvm_irq_dpci);
+    }
+}
+
 void pci_release_devices(struct domain *d)
 {
     struct pci_dev *pdev;
     u8 bus, devfn;
 
+    pci_clean_dpci_irqs(d);
     while ( (pdev = pci_lock_domain_pdev(d, -1, -1)) )
     {
         pci_cleanup_msi(pdev);