x86/mm: Make iommu passthrough and mem paging/sharing mutually exclusive
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>
Thu, 29 Mar 2012 11:01:33 +0000 (12:01 +0100)
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>
Thu, 29 Mar 2012 11:01:33 +0000 (12:01 +0100)
Regardless of table sharing or processor vendor, these features cannot coexist
since iommu's don't expect gfn->mfn mappings to change, and sharing and paging
depend on trapping all accesses.

Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Acked-by: Tim Deegan <tim@xen.org>
Committed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/mm/mem_event.c
xen/arch/x86/mm/mem_sharing.c
xen/drivers/passthrough/iommu.c

index 4e01c7ff21f2e931dcd489f15e4bc32b33578ff8..f15293fe4de7911a3ac5532e48ed61f32b9cda63 100644 (file)
@@ -567,7 +567,11 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
             if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
                 break;
 
+            /* No paging if iommu is used */
             rc = -EXDEV;
+            if ( unlikely(need_iommu(d)) )
+                break;
+
             /* Disallow paging in a PoD guest */
             if ( p2m->pod.entry_count )
                 break;
index 1456de55d722663e7c457f779437fb2e8c301aff..c838fd848f92c92b4d7840ec0fdf6bc64ba241bf 100644 (file)
@@ -1205,8 +1205,11 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
     {
         case XEN_DOMCTL_MEM_SHARING_CONTROL:
         {
-            d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
             rc = 0;
+            if ( unlikely(need_iommu(d) && mec->u.enable) )
+                rc = -EXDEV;
+            else
+                d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
         }
         break;
 
index 82488d3e9f132181fb6e1fdbd09b508d79e9b042..4aefd91a053cf432a22e5ff234aee47b1f3e5e94 100644 (file)
@@ -205,6 +205,13 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
     if ( !iommu_enabled || !hd->platform_ops )
         return 0;
 
+    /* Prevent device assign if mem paging or mem sharing have been 
+     * enabled for this domain */
+    if ( unlikely(!need_iommu(d) &&
+            (d->arch.hvm_domain.mem_sharing_enabled ||
+             d->mem_event->paging.ring_page)) )
+        return -EXDEV;
+
     spin_lock(&pcidevs_lock);
     if ( (rc = hd->platform_ops->assign_device(d, seg, bus, devfn)) )
         goto done;