AMD IOMMU: Invalidate all pages on domain destruction
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 3 Dec 2008 15:56:33 +0000 (15:56 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 3 Dec 2008 15:56:33 +0000 (15:56 +0000)
Attached patch adds support to invalidate all pages associated with
the same domain ID on domain destruction.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
xen/drivers/passthrough/amd/iommu_map.c
xen/drivers/passthrough/amd/pci_amd_iommu.c
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h

index a41fe608908cb3c0a997ff58c25a3fba519f7807..3f4808d31d08004755f4e0645a54e1363a3ad387 100644 (file)
@@ -580,3 +580,47 @@ out:
     spin_unlock_irqrestore(&hd->mapping_lock, flags);
     return 0;
 }
+
+void invalidate_all_iommu_pages(struct domain *d)
+{
+    u32 cmd[4], entry;
+    unsigned long flags;
+    struct amd_iommu *iommu;
+    int domain_id = d->domain_id;
+    u64 addr_lo = 0x7FFFFFFFFFFFF000ULL & DMA_32BIT_MASK;
+    u64 addr_hi = 0x7FFFFFFFFFFFF000ULL >> 32;
+
+    set_field_in_reg_u32(domain_id, 0,
+                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
+                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
+    cmd[1] = entry;
+
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
+                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
+                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
+                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
+    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
+    cmd[2] = entry;
+
+    set_field_in_reg_u32((u32)addr_hi, 0,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
+                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
+    cmd[3] = entry;
+
+    cmd[0] = 0;
+
+    for_each_amd_iommu ( iommu )
+    {
+        spin_lock_irqsave(&iommu->lock, flags);
+        send_iommu_command(iommu, cmd);
+        flush_command_buffer(iommu);
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
+}
index 8e982455cb0a24069a0927da5071aad5b4836ce6..310911e15ea36b36d38636b4c5610253eb96e213 100644 (file)
@@ -389,6 +389,7 @@ static void deallocate_iommu_page_tables(struct domain *d)
 static void amd_iommu_domain_destroy(struct domain *d)
 {
     deallocate_iommu_page_tables(d);
+    invalidate_all_iommu_pages(d);
 }
 
 static int amd_iommu_return_device(
index e9e47990c0fefb528844d7fdc3f34509de12c234..929c5fd3f5683941cee3a830a452c9ea7a88b07b 100644 (file)
@@ -63,6 +63,7 @@ void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
 int amd_iommu_reserve_domain_unity_map(struct domain *domain,
         unsigned long phys_addr, unsigned long size, int iw, int ir);
 int amd_iommu_sync_p2m(struct domain *d);
+void invalidate_all_iommu_pages(struct domain *d);
 
 /* device table functions */
 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,