/* check IOTLB invalidation granularity */
if ( DMA_TLB_IAIG(val) == 0 )
printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
+
+#ifdef VTD_DEBUG
if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
(u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
+#endif
/* flush context entry will implictly flush write buffer */
return 0;
}
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
/* get last level pte */
- pg_maddr = dma_addr_level_page_maddr(domain, addr, 1);
+ pg_maddr = dma_addr_level_page_maddr(domain, addr, 2);
if ( pg_maddr == 0 )
return;
page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
pte = page + address_level_offset(addr, 1);
- if ( pte )
+
+ if ( !dma_pte_present(*pte) )
{
- dma_clear_pte(*pte);
- iommu_flush_cache_entry(drhd->iommu, pte);
+ unmap_vtd_domain_page(page);
+ return;
+ }
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
+ dma_clear_pte(*pte);
+ iommu_flush_cache_entry(drhd->iommu, pte);
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
- continue;
+ for_each_drhd_unit ( drhd )
+ {
+ iommu = drhd->iommu;
- if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
- addr, 1, 0);
- else if (cap_rwbf(iommu->cap))
- iommu_flush_write_buffer(iommu);
- }
+ if ( test_bit(iommu->index, &hd->iommu_bitmap) )
+ iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), addr, 1, 0);
}
+
unmap_vtd_domain_page(page);
}
unmap_vtd_domain_page(context_entries);
+ /* it's a non-present to present mapping */
if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain),
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT, 1) )
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);
+ iommu_flush_iotlb_dsi(iommu, 0, 0);
set_bit(iommu->index, &hd->iommu_bitmap);
spin_unlock_irqrestore(&iommu->lock, flags);
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
continue;
- if ( pte_present || cap_caching_mode(iommu->cap) )
+ if ( pte_present )
iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
(paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
- else if ( cap_rwbf(iommu->cap) )
+ else if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
+ (paddr_t)gfn << PAGE_SHIFT_4K, 1, 1) )
iommu_flush_write_buffer(iommu);
}
int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
- struct hvm_iommu *hd = domain_hvm_iommu(d);
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
- struct dma_pte *page = NULL, *pte = NULL;
- u64 pg_maddr;
drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
iommu = drhd->iommu;
return 0;
#endif
- pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K);
- if ( pg_maddr == 0 )
- return -ENOMEM;
- page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
- pte = page + (gfn & LEVEL_MASK);
- dma_clear_pte(*pte);
- iommu_flush_cache_entry(drhd->iommu, pte);
- unmap_vtd_domain_page(page);
-
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
-
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
- continue;
-
- if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
- (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0) )
- iommu_flush_write_buffer(iommu);
- }
+ dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
return 0;
}
if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
continue;
- if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
- iova, index, 0);
- else if ( cap_rwbf(iommu->cap) )
+ if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
+ iova, index, 1) )
iommu_flush_write_buffer(iommu);
}
return 0;
}
-void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry)
-{
- struct hvm_iommu *hd = domain_hvm_iommu(d);
- struct acpi_drhd_unit *drhd;
- struct iommu *iommu = NULL;
- struct dma_pte *pte = (struct dma_pte *) p2m_entry;
-
- for_each_drhd_unit ( drhd )
- {
- iommu = drhd->iommu;
-
- if ( !test_bit(iommu->index, &hd->iommu_bitmap) )
- continue;
-
- if ( cap_caching_mode(iommu->cap) )
- iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d),
- (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0);
- else if ( cap_rwbf(iommu->cap) )
- iommu_flush_write_buffer(iommu);
- }
-
- iommu_flush_cache_entry(iommu, pte);
-}
-
static int iommu_prepare_rmrr_dev(
struct domain *d,
struct acpi_rmrr_unit *rmrr,