}
}
-static void iommu_free_next_pagetable(u64 pt_maddr, unsigned long index,
- int level)
+static void iommu_free_pagetable(u64 pt_maddr, int level)
{
- unsigned long next_index;
- struct dma_pte *pt_vaddr, *pde;
- int next_level;
+ int i;
+ struct dma_pte *pt_vaddr, *pte;
+ int next_level = level - 1;
if ( pt_maddr == 0 )
return;
pt_vaddr = (struct dma_pte *)map_vtd_domain_page(pt_maddr);
- pde = &pt_vaddr[index];
- if ( dma_pte_addr(*pde) == 0 )
- goto out;
- next_level = level - 1;
- if ( next_level > 1 )
+ for ( i = 0; i < PTE_NUM; i++ )
{
- for ( next_index = 0; next_index < PTE_NUM; next_index++ )
- iommu_free_next_pagetable(pde->val, next_index, next_level);
- }
-
- dma_clear_pte(*pde);
- iommu_flush_cache_entry(pde);
- free_pgtable_maddr(pde->val);
-
- out:
- unmap_vtd_domain_page(pt_vaddr);
-}
-
-/* free all VT-d page tables when shut down or destroy domain. */
-static void iommu_free_pagetable(struct domain *domain)
-{
- struct hvm_iommu *hd = domain_hvm_iommu(domain);
- int i, total_level = agaw_to_level(hd->agaw);
+ pte = &pt_vaddr[i];
+ if ( !dma_pte_present(*pte) )
+ continue;
- if ( hd->pgd_maddr == 0 )
- return;
+ if ( next_level >= 1 )
+ iommu_free_pagetable(dma_pte_addr(*pte), next_level);
- for ( i = 0; i < PTE_NUM; i++ )
- iommu_free_next_pagetable(hd->pgd_maddr, i, total_level + 1);
+ dma_clear_pte(*pte);
+ iommu_flush_cache_entry(pte);
+ }
- free_pgtable_maddr(hd->pgd_maddr);
- hd->pgd_maddr = 0;
+ unmap_vtd_domain_page(pt_vaddr);
+ free_pgtable_maddr(pt_maddr);
}
static int iommu_set_root_entry(struct iommu *iommu)
void iommu_domain_teardown(struct domain *d)
{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
if ( list_empty(&acpi_drhd_units) )
return;
- iommu_free_pagetable(d);
return_devices_to_dom0(d);
+ iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
+ hd->pgd_maddr = 0;
iommu_domid_release(d);
}