unsigned long flags;
struct amd_iommu *iommu;
unsigned int req_id, queueid, maxpend;
- struct pci_ats_dev *ats_pdev;
if ( !ats_enabled )
return;
- ats_pdev = get_ats_device(pdev->seg, pdev->bus, pdev->devfn);
- if ( ats_pdev == NULL )
+ if ( !pci_ats_enabled(pdev->seg, pdev->bus, pdev->devfn) )
return;
- if ( !pci_ats_enabled(ats_pdev->seg, ats_pdev->bus, ats_pdev->devfn) )
- return;
-
- iommu = find_iommu_for_device(ats_pdev->seg,
- PCI_BDF2(ats_pdev->bus, ats_pdev->devfn));
+ iommu = find_iommu_for_device(pdev->seg, PCI_BDF2(pdev->bus, pdev->devfn));
if ( !iommu )
{
AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n",
- __func__, ats_pdev->seg, ats_pdev->bus,
- PCI_SLOT(ats_pdev->devfn), PCI_FUNC(ats_pdev->devfn));
+ __func__, pdev->seg, pdev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
return;
}
if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
return;
- req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(ats_pdev->bus, devfn));
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn));
queueid = req_id;
- maxpend = ats_pdev->ats_queue_depth & 0xff;
+ maxpend = pdev->ats.queue_depth & 0xff;
/* send INVALIDATE_IOTLB_PAGES command */
spin_lock_irqsave(&iommu->lock, flags);
}
spin_lock_init(&iommu->lock);
+ INIT_LIST_HEAD(&iommu->ats_devices);
iommu->seg = ivhd_block->pci_segment_group;
iommu->bdf = ivhd_block->header.device_id;
!pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
{
if ( devfn == pdev->devfn )
- enable_ats_device(iommu->seg, bus, devfn, iommu);
+ enable_ats_device(pdev, &iommu->ats_devices);
amd_iommu_flush_iotlb(devfn, pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
}
if ( devfn == pdev->devfn &&
pci_ats_device(iommu->seg, bus, devfn) &&
pci_ats_enabled(iommu->seg, bus, devfn) )
- disable_ats_device(iommu->seg, bus, devfn);
+ disable_ats_device(pdev);
}
static int reassign_device(struct domain *source, struct domain *target,
#include <xen/pci_regs.h>
-struct pci_ats_dev {
- struct list_head list;
- u16 seg;
- u8 bus;
- u8 devfn;
- u16 ats_queue_depth; /* ATS device invalidation queue depth */
- const void *iommu; /* No common IOMMU struct so use void pointer */
-};
-
#define ATS_REG_CAP 4
#define ATS_REG_CTL 6
#define ATS_QUEUE_DEPTH_MASK 0x1f
#define ATS_ENABLE (1<<15)
-extern struct list_head ats_devices;
extern bool_t ats_enabled;
-int enable_ats_device(int seg, int bus, int devfn, const void *iommu);
-void disable_ats_device(int seg, int bus, int devfn);
-struct pci_ats_dev *get_ats_device(int seg, int bus, int devfn);
+int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list);
+void disable_ats_device(struct pci_dev *pdev);
static inline int pci_ats_enabled(int seg, int bus, int devfn)
{
return -ENOMEM;
iommu->msi.irq = -1; /* No irq assigned yet. */
+ INIT_LIST_HEAD(&iommu->ats_devices);
iommu->intel = alloc_intel_iommu();
if ( iommu->intel == NULL )
return rc;
}
-static int domain_context_mapping(
- struct domain *domain, u8 devfn, const struct pci_dev *pdev)
+static int domain_context_mapping(struct domain *domain, u8 devfn,
+ struct pci_dev *pdev)
{
struct acpi_drhd_unit *drhd;
int ret = 0;
ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn,
pdev);
if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
- enable_ats_device(seg, bus, devfn, drhd->iommu);
+ enable_ats_device(pdev, &drhd->iommu->ats_devices);
break;
return rc;
}
-static int domain_context_unmap(
- struct domain *domain, u8 devfn, const struct pci_dev *pdev)
+static int domain_context_unmap(struct domain *domain, u8 devfn,
+ struct pci_dev *pdev)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
PCI_SLOT(devfn), PCI_FUNC(devfn));
ret = domain_context_unmap_one(domain, iommu, bus, devfn);
if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
- disable_ats_device(seg, bus, devfn);
+ disable_ats_device(pdev);
break;
if ( ret <= 0 )
return ret;
- ret = enable_ats_device(pdev->seg, pdev->bus, pdev->devfn, drhd->iommu);
+ ret = enable_ats_device(pdev, &drhd->iommu->ats_devices);
return ret >= 0 ? 0 : ret;
}
u64 root_maddr; /* root entry machine address */
struct msi_desc msi;
struct intel_iommu *intel;
+ struct list_head ats_devices;
unsigned long *domid_bitmap; /* domain id bitmap */
u16 *domid_map; /* domain id mapping array */
};
return pos;
}
-static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
+static int device_in_domain(const struct iommu *iommu,
+ const struct pci_dev *pdev, u16 did)
{
struct root_entry *root_entry = NULL;
struct context_entry *ctxt_entry = NULL;
int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
{
- struct pci_ats_dev *pdev;
+ const struct pci_dev *pdev;
int ret = 0;
if ( !ecap_dev_iotlb(iommu->ecap) )
return ret;
- list_for_each_entry( pdev, &ats_devices, list )
+ list_for_each_entry( pdev, &iommu->ats_devices, ats.list )
{
u16 sid = PCI_BDF2(pdev->bus, pdev->devfn);
bool_t sbit;
int rc = 0;
- /* Only invalidate devices that belong to this IOMMU */
- if ( pdev->iommu != iommu )
- continue;
-
switch ( type )
{
case DMA_TLB_DSI_FLUSH:
/* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
sbit = 1;
addr = (~0UL << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
- rc = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ rc = qinval_device_iotlb_sync(iommu, pdev->ats.queue_depth,
sid, sbit, addr);
break;
case DMA_TLB_PSI_FLUSH:
addr |= (((u64)1 << (size_order - 1)) - 1) << PAGE_SHIFT_4K;
}
- rc = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ rc = qinval_device_iotlb_sync(iommu, pdev->ats.queue_depth,
sid, sbit, addr);
break;
default:
#include <xen/pci_regs.h>
#include "../ats.h"
-LIST_HEAD(ats_devices);
-
bool_t __read_mostly ats_enabled = 0;
boolean_param("ats", ats_enabled);
-int enable_ats_device(int seg, int bus, int devfn, const void *iommu)
+int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list)
{
- struct pci_ats_dev *pdev = NULL;
u32 value;
+ u16 seg = pdev->seg;
+ u8 bus = pdev->bus, devfn = pdev->devfn;
int pos;
pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
PCI_FUNC(devfn), pos + ATS_REG_CTL);
if ( value & ATS_ENABLE )
{
- list_for_each_entry ( pdev, &ats_devices, list )
- {
- if ( pdev->seg == seg && pdev->bus == bus && pdev->devfn == devfn )
+ struct pci_dev *other;
+
+ list_for_each_entry ( other, ats_list, ats.list )
+ if ( other == pdev )
{
pos = 0;
break;
}
- }
}
- if ( pos )
- pdev = xmalloc(struct pci_ats_dev);
- if ( !pdev )
- return -ENOMEM;
if ( !(value & ATS_ENABLE) )
{
if ( pos )
{
- pdev->seg = seg;
- pdev->bus = bus;
- pdev->devfn = devfn;
- pdev->iommu = iommu;
+ pdev->ats.cap_pos = pos;
value = pci_conf_read16(seg, bus, PCI_SLOT(devfn),
PCI_FUNC(devfn), pos + ATS_REG_CAP);
- pdev->ats_queue_depth = value & ATS_QUEUE_DEPTH_MASK ?:
+ pdev->ats.queue_depth = value & ATS_QUEUE_DEPTH_MASK ?:
ATS_QUEUE_DEPTH_MASK + 1;
- list_add(&pdev->list, &ats_devices);
+ list_add(&pdev->ats.list, ats_list);
}
if ( iommu_verbose )
return pos;
}
-void disable_ats_device(int seg, int bus, int devfn)
+void disable_ats_device(struct pci_dev *pdev)
{
- struct pci_ats_dev *pdev;
u32 value;
- int pos;
+ u16 seg = pdev->seg;
+ u8 bus = pdev->bus, devfn = pdev->devfn;
- pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
- BUG_ON(!pos);
+ BUG_ON(!pdev->ats.cap_pos);
- value = pci_conf_read16(seg, bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn), pos + ATS_REG_CTL);
+ value = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ pdev->ats.cap_pos + ATS_REG_CTL);
value &= ~ATS_ENABLE;
pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pos + ATS_REG_CTL, value);
+ pdev->ats.cap_pos + ATS_REG_CTL, value);
- list_for_each_entry ( pdev, &ats_devices, list )
- {
- if ( pdev->seg == seg && pdev->bus == bus && pdev->devfn == devfn )
- {
- list_del(&pdev->list);
- xfree(pdev);
- break;
- }
- }
+ list_del(&pdev->ats.list);
if ( iommu_verbose )
dprintk(XENLOG_INFO, "%04x:%02x:%02x.%u: ATS is disabled\n",
seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
-
-struct pci_ats_dev *get_ats_device(int seg, int bus, int devfn)
-{
- struct pci_ats_dev *pdev;
-
- if ( !pci_ats_device(seg, bus, devfn) )
- return NULL;
-
- list_for_each_entry ( pdev, &ats_devices, list )
- {
- if ( pdev->seg == seg && pdev->bus == bus && pdev->devfn == devfn )
- return pdev;
- }
-
- return NULL;
-}
uint64_t exclusion_limit;
int enabled;
+
+ struct list_head ats_devices;
};
struct ivrs_mappings {
struct pci_dev_info info;
struct arch_pci_dev arch;
+ struct {
+ struct list_head list;
+ unsigned int cap_pos;
+ unsigned int queue_depth;
+ } ats;
struct {
s_time_t time;
unsigned int count;