if ( !(val & DMA_GSTS_WBFS) )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional,"
- " please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
spin_unlock_irqrestore(&iommu->register_lock, flag);
if ( !(val & DMA_CCMD_ICC) )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional, please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
spin_unlock_irqrestore(&iommu->register_lock, flag);
if ( !(val & DMA_TLB_IVT) )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional, please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
if ( DMA_TLB_IAIG(val) == 0 )
- printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
+ dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: flush IOTLB failed\n");
-#ifdef VTD_DEBUG
if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
- printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
+ dprintk(XENLOG_INFO VTDPREFIX,
+ "IOMMU: tlb flush request %x, actual %x\n",
(u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
-#endif
/* flush context entry will implictly flush write buffer */
return 0;
}
unsigned int align;
struct iommu_flush *flush = iommu_get_flush(iommu);
- BUG_ON(addr & (~PAGE_MASK_4K));
- BUG_ON(pages == 0);
+ ASSERT(!(addr & (~PAGE_MASK_4K)));
+ ASSERT(pages > 0);
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
{
iommu = drhd->iommu;
if ( test_bit(iommu->index, &hd->iommu_bitmap) )
- iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
- addr, 1, 0);
+ if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
+ addr, 1, 0))
+ iommu_flush_write_buffer(iommu);
}
unmap_vtd_domain_page(page);
if ( sts & DMA_GSTS_RTPS )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional, please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
if ( sts & DMA_GSTS_TES )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional, please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
if ( !(sts & DMA_GSTS_TES) )
break;
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
- panic("DMAR hardware is malfunctional, please disable IOMMU\n");
+ panic("%s: DMAR hardware is malfunctional,"
+ " please disable IOMMU\n", __func__);
cpu_relax();
}
spin_unlock_irqrestore(&iommu->register_lock, flags);
int iommu_page_mapping(struct domain *domain, paddr_t iova,
paddr_t hpa, size_t size, int prot)
{
+ struct hvm_iommu *hd = domain_hvm_iommu(domain);
+ struct acpi_drhd_unit *drhd;
+ struct iommu *iommu;
u64 start_pfn, end_pfn;
struct dma_pte *page = NULL, *pte = NULL;
int index;
index++;
}
+ if ( index > 0 )
+ {
+ for_each_drhd_unit ( drhd )
+ {
+ iommu = drhd->iommu;
+ if ( test_bit(iommu->index, &hd->iommu_bitmap) )
+ if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
+ iova, index, 1))
+ iommu_flush_write_buffer(iommu);
+ }
+ }
+
return 0;
}