IOMMU: clear "don't flush" override on error paths
authorJan Beulich <jbeulich@suse.com>
Tue, 10 Dec 2013 15:10:37 +0000 (16:10 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 10 Dec 2013 15:10:37 +0000 (16:10 +0100)
Both xenmem_add_to_physmap() and iommu_populate_page_table() each have
an error path that fails to clear that flag, thus suppressing further
flushes on the respective pCPU.

In iommu_populate_page_table() also slightly re-arrange code to avoid
the false impression of the flag in question being guarded by a
domain's page_alloc_lock.

This is CVE-2013-6400 / XSA-80.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
xen/arch/x86/mm.c
xen/drivers/passthrough/iommu.c

index 6c26026e05ca78be36ad8a34ff1aaba762457005..dd42bde212f3244b756c23f388979a53d709700c 100644 (file)
@@ -4648,7 +4648,7 @@ static int xenmem_add_to_physmap(struct domain *d,
         {
             rc = xenmem_add_to_physmap_once(d, xatp);
             if ( rc < 0 )
-                return rc;
+                break;
 
             xatp->idx++;
             xatp->gpfn++;
index bdc72616b85be3c2cd604cce9cb421c20bad40d0..d3dd6838c4e1122f24f4cea6c8e1fff253c64961 100644 (file)
@@ -322,11 +322,11 @@ static int iommu_populate_page_table(struct domain *d)
 {
     struct hvm_iommu *hd = domain_hvm_iommu(d);
     struct page_info *page;
-    int rc;
+    int rc = 0;
 
+    this_cpu(iommu_dont_flush_iotlb) = 1;
     spin_lock(&d->page_alloc_lock);
 
-    this_cpu(iommu_dont_flush_iotlb) = 1;
     page_list_for_each ( page, &d->page_list )
     {
         if ( is_hvm_domain(d) ||
@@ -336,18 +336,20 @@ static int iommu_populate_page_table(struct domain *d)
             rc = hd->platform_ops->map_page(
                 d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
                 IOMMUF_readable|IOMMUF_writable);
-            if (rc)
-            {
-                spin_unlock(&d->page_alloc_lock);
-                hd->platform_ops->teardown(d);
-                return rc;
-            }
+            if ( rc )
+                break;
         }
     }
-    this_cpu(iommu_dont_flush_iotlb) = 0;
-    iommu_iotlb_flush_all(d);
+
     spin_unlock(&d->page_alloc_lock);
-    return 0;
+    this_cpu(iommu_dont_flush_iotlb) = 0;
+
+    if ( !rc )
+        iommu_iotlb_flush_all(d);
+    else
+        hd->platform_ops->teardown(d);
+
+    return rc;
 }