!pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
{
if ( devfn == pdev->devfn )
- enable_ats_device(iommu->seg, bus, devfn);
+ enable_ats_device(iommu->seg, bus, devfn, iommu);
amd_iommu_flush_iotlb(devfn, pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
}
u8 bus;
u8 devfn;
u16 ats_queue_depth; /* ATS device invalidation queue depth */
+ const void *iommu; /* No common IOMMU struct so use void pointer */
};
#define ATS_REG_CAP 4
extern struct list_head ats_devices;
extern bool_t ats_enabled;
-int enable_ats_device(int seg, int bus, int devfn);
+int enable_ats_device(int seg, int bus, int devfn, const void *iommu);
void disable_ats_device(int seg, int bus, int devfn);
struct pci_ats_dev *get_ats_device(int seg, int bus, int devfn);
ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn,
pdev);
if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
- enable_ats_device(seg, bus, devfn);
+ enable_ats_device(seg, bus, devfn, drhd->iommu);
break;
if ( ret <= 0 )
return ret;
- ret = enable_ats_device(pdev->seg, pdev->bus, pdev->devfn);
+ ret = enable_ats_device(pdev->seg, pdev->bus, pdev->devfn, drhd->iommu);
return ret >= 0 ? 0 : ret;
}
{
sid = (pdev->bus << 8) | pdev->devfn;
+ /* Only invalidate devices that belong to this IOMMU */
+ if ( pdev->iommu != iommu )
+ continue;
+
switch ( type ) {
case DMA_TLB_DSI_FLUSH:
if ( !device_in_domain(iommu, pdev, did) )
bool_t __read_mostly ats_enabled = 1;
boolean_param("ats", ats_enabled);
-int enable_ats_device(int seg, int bus, int devfn)
+int enable_ats_device(int seg, int bus, int devfn, const void *iommu)
{
struct pci_ats_dev *pdev = NULL;
u32 value;
pdev->seg = seg;
pdev->bus = bus;
pdev->devfn = devfn;
+ pdev->iommu = iommu;
value = pci_conf_read16(seg, bus, PCI_SLOT(devfn),
PCI_FUNC(devfn), pos + ATS_REG_CAP);
pdev->ats_queue_depth = value & ATS_QUEUE_DEPTH_MASK ?: