}
/* return value determine if we need a write buffer flush */
-static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id,
- u8 function_mask, u64 type,
- bool_t flush_non_present_entry)
+static int __must_check flush_context_reg(struct vtd_iommu *iommu, u16 did,
+ u16 source_id, u8 function_mask,
+ u64 type,
+ bool flush_non_present_entry)
{
- struct vtd_iommu *iommu = _iommu;
u64 val = 0;
unsigned long flags;
}
static int __must_check iommu_flush_context_global(struct vtd_iommu *iommu,
- bool_t flush_non_present_entry)
+ bool flush_non_present_entry)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- flush_non_present_entry);
+ return iommu->flush.context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+ flush_non_present_entry);
}
static int __must_check iommu_flush_context_device(struct vtd_iommu *iommu,
u16 did, u16 source_id,
u8 function_mask,
- bool_t flush_non_present_entry)
+ bool flush_non_present_entry)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
- return flush->context(iommu, did, source_id, function_mask,
- DMA_CCMD_DEVICE_INVL,
- flush_non_present_entry);
+ return iommu->flush.context(iommu, did, source_id, function_mask,
+ DMA_CCMD_DEVICE_INVL, flush_non_present_entry);
}
/* return value determine if we need a write buffer flush */
-static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr,
+static int __must_check flush_iotlb_reg(struct vtd_iommu *iommu, u16 did,
+ u64 addr,
unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
- struct vtd_iommu *iommu = _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0;
unsigned long flags;
}
static int __must_check iommu_flush_iotlb_global(struct vtd_iommu *iommu,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
bool_t flush_non_present_entry,
bool_t flush_dev_iotlb)
{
- struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
ASSERT(!(addr & (~PAGE_MASK_4K)));
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb);
/* Fallback to domain selective flush if size is too big */
if ( order > cap_max_amask_val(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb);
addr >>= PAGE_SHIFT_4K + order;
addr <<= PAGE_SHIFT_4K + order;
/* apply platform specific errata workarounds */
vtd_ops_preamble_quirk(iommu);
- status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ status = iommu->flush.iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
+ flush_non_present_entry, flush_dev_iotlb);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
- struct iommu_flush *flush = NULL;
int ret;
unsigned long flags;
u32 sts;
*/
if ( enable_qinval(iommu) != 0 )
{
- flush = iommu_get_flush(iommu);
- flush->context = flush_context_reg;
- flush->iotlb = flush_iotlb_reg;
+ iommu->flush.context = flush_context_reg;
+ iommu->flush.iotlb = flush_iotlb_reg;
}
}
extern struct list_head acpi_rmrr_units;
extern struct list_head acpi_ioapic_units;
-struct iommu_flush {
- int __must_check (*context)(void *iommu, u16 did, u16 source_id,
- u8 function_mask, u64 type,
- bool_t non_present_entry_flush);
- int __must_check (*iotlb)(void *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb);
-};
-
struct intel_iommu {
- struct iommu_flush flush;
struct acpi_drhd_unit *drhd;
};
spinlock_t lock; /* lock for irq remapping table */
} intremap;
+ struct {
+ int __must_check (*context)(struct vtd_iommu *iommu, u16 did,
+ u16 source_id, u8 function_mask, u64 type,
+ bool non_present_entry_flush);
+ int __must_check (*iotlb)(struct vtd_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb);
+ } flush;
+
struct list_head ats_devices;
unsigned long *domid_bitmap; /* domain id bitmap */
u16 *domid_map; /* domain id mapping array */
};
-static inline struct iommu_flush *iommu_get_flush(struct vtd_iommu *iommu)
-{
- return iommu ? &iommu->intel->flush : NULL;
-}
-
#define INTEL_IOMMU_DEBUG(fmt, args...) \
do \
{ \
return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
-static int __must_check flush_context_qi(void *_iommu, u16 did,
+static int __must_check flush_context_qi(struct vtd_iommu *iommu, u16 did,
u16 sid, u8 fm, u64 type,
- bool_t flush_non_present_entry)
+ bool flush_non_present_entry)
{
- struct vtd_iommu *iommu = _iommu;
-
ASSERT(iommu->qinval_maddr);
/*
type >> DMA_CCMD_INVL_GRANU_OFFSET);
}
-static int __must_check flush_iotlb_qi(void *_iommu, u16 did, u64 addr,
+static int __must_check flush_iotlb_qi(struct vtd_iommu *iommu, u16 did,
+ u64 addr,
unsigned int size_order, u64 type,
- bool_t flush_non_present_entry,
- bool_t flush_dev_iotlb)
+ bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
u8 dr = 0, dw = 0;
int ret = 0, rc;
- struct vtd_iommu *iommu = _iommu;
ASSERT(iommu->qinval_maddr);
int enable_qinval(struct vtd_iommu *iommu)
{
- struct iommu_flush *flush;
u32 sts;
unsigned long flags;
if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
return -ENOENT;
- flush = iommu_get_flush(iommu);
-
/* Return if already enabled by Xen */
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
if ( (sts & DMA_GSTS_QIES) && iommu->qinval_maddr )
}
}
- flush->context = flush_context_qi;
- flush->iotlb = flush_iotlb_qi;
+ iommu->flush.context = flush_context_qi;
+ iommu->flush.iotlb = flush_iotlb_qi;
spin_lock_irqsave(&iommu->register_lock, flags);