unsigned int used_entries;
};
-static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr,
- u32 id, void *ctxt)
+static int cf_check get_reserved_device_memory(
+ xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt)
{
struct get_reserved_device_memory *grdm = ctxt;
uint32_t sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus,
int amd_iommu_update_ivrs_mapping_acpi(void);
int cf_check iov_adjust_irq_affinities(void);
-int amd_iommu_quarantine_init(struct domain *d);
+int cf_check amd_iommu_quarantine_init(struct domain *d);
/* mapping functions */
-int __must_check amd_iommu_map_page(struct domain *d, dfn_t dfn,
- mfn_t mfn, unsigned int flags,
- unsigned int *flush_flags);
-int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
- unsigned int *flush_flags);
+int __must_check cf_check amd_iommu_map_page(
+ struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags);
+int __must_check cf_check amd_iommu_unmap_page(
+ struct domain *d, dfn_t dfn, unsigned int *flush_flags);
int __must_check amd_iommu_alloc_root(struct domain *d);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
const struct ivrs_unity_map *map,
unsigned int flag);
int amd_iommu_reserve_domain_unity_unmap(struct domain *d,
const struct ivrs_unity_map *map);
-int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
-int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int flush_flags);
-int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
+int cf_check amd_iommu_get_reserved_device_memory(
+ iommu_grdm_t *func, void *ctxt);
+int __must_check cf_check amd_iommu_flush_iotlb_pages(
+ struct domain *d, dfn_t dfn, unsigned long page_count,
+ unsigned int flush_flags);
+int __must_check cf_check amd_iommu_flush_iotlb_all(struct domain *d);
/* device table functions */
int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
struct amd_iommu *find_iommu_for_device(int seg, int bdf);
/* interrupt remapping */
-bool iov_supports_xt(void);
+bool cf_check iov_supports_xt(void);
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(
const struct amd_iommu *, unsigned long **, unsigned int nr);
-int amd_iommu_free_intremap_table(
+int cf_check amd_iommu_free_intremap_table(
const struct amd_iommu *, struct ivrs_mappings *, uint16_t);
unsigned int amd_iommu_intremap_table_order(
const void *irt, const struct amd_iommu *iommu);
-void amd_iommu_ioapic_update_ire(
+void cf_check amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value);
-unsigned int amd_iommu_read_ioapic_from_ire(
+unsigned int cf_check amd_iommu_read_ioapic_from_ire(
unsigned int apic, unsigned int reg);
-int amd_iommu_msi_msg_update_ire(
+int cf_check amd_iommu_msi_msg_update_ire(
struct msi_desc *msi_desc, struct msi_msg *msg);
-int amd_setup_hpet_msi(struct msi_desc *msi_desc);
+int cf_check amd_setup_hpet_msi(struct msi_desc *msi_desc);
void cf_check amd_iommu_dump_intremap_tables(unsigned char key);
extern struct ioapic_sbdf {
extern unsigned long *shared_intremap_inuse;
/* power management support */
-void amd_iommu_resume(void);
-int __must_check amd_iommu_suspend(void);
-void amd_iommu_crash_shutdown(void);
+void cf_check amd_iommu_resume(void);
+int __must_check cf_check amd_iommu_suspend(void);
+void cf_check amd_iommu_crash_shutdown(void);
/* guest iommu support */
#ifdef CONFIG_HVM
writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
}
-static void set_iommu_event_log_control(struct amd_iommu *iommu,
- bool enable)
+static void cf_check set_iommu_event_log_control(
+ struct amd_iommu *iommu, bool enable)
{
/* Reset head and tail pointer manually before enablement */
if ( enable )
writeq(iommu->ctrl.raw, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
}
-static void set_iommu_ppr_log_control(struct amd_iommu *iommu,
- bool enable)
+static void cf_check set_iommu_ppr_log_control(
+ struct amd_iommu *iommu, bool enable)
{
/* Reset head and tail pointer manually before enablement */
if ( enable )
.set_affinity = set_x2apic_affinity,
};
-static void parse_event_log_entry(struct amd_iommu *iommu, u32 entry[])
+static void cf_check parse_event_log_entry(struct amd_iommu *iommu, u32 entry[])
{
u32 code;
static const char *const event_str[] = {
spin_unlock_irqrestore(&iommu->lock, flags);
}
-void parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[])
+static void cf_check parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[])
{
u16 device_id;
return 0;
}
-static int __init amd_iommu_setup_device_table(
+static int __init cf_check amd_iommu_setup_device_table(
u16 seg, struct ivrs_mappings *ivrs_mappings)
{
struct amd_iommu_dte *dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings);
amd_iommu_flush_all_pages(d);
}
-static int _invalidate_all_devices(
+static int cf_check _invalidate_all_devices(
u16 seg, struct ivrs_mappings *ivrs_mappings)
{
unsigned int bdf;
iterate_ivrs_mappings(_invalidate_all_devices);
}
-int amd_iommu_suspend(void)
+int cf_check amd_iommu_suspend(void)
{
amd_iommu_crash_shutdown();
return 0;
}
-void amd_iommu_crash_shutdown(void)
+void cf_check amd_iommu_crash_shutdown(void)
{
struct amd_iommu *iommu;
disable_iommu(iommu);
}
-void amd_iommu_resume(void)
+void cf_check amd_iommu_resume(void)
{
struct amd_iommu *iommu;
return 0;
}
-void amd_iommu_ioapic_update_ire(
+void cf_check amd_iommu_ioapic_update_ire(
unsigned int apic, unsigned int reg, unsigned int value)
{
struct IO_APIC_route_entry old_rte = { 0 };
}
}
-unsigned int amd_iommu_read_ioapic_from_ire(
+unsigned int cf_check amd_iommu_read_ioapic_from_ire(
unsigned int apic, unsigned int reg)
{
unsigned int idx;
return ERR_PTR(-EINVAL);
}
-int amd_iommu_msi_msg_update_ire(
+int cf_check amd_iommu_msi_msg_update_ire(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct pci_dev *pdev = msi_desc->dev;
return rc;
}
-int amd_iommu_free_intremap_table(
+int cf_check amd_iommu_free_intremap_table(
const struct amd_iommu *iommu, struct ivrs_mappings *ivrs_mapping,
uint16_t bdf)
{
return tb;
}
-bool __init iov_supports_xt(void)
+bool __init cf_check iov_supports_xt(void)
{
unsigned int apic;
return true;
}
-int __init amd_setup_hpet_msi(struct msi_desc *msi_desc)
+int __init cf_check amd_setup_hpet_msi(struct msi_desc *msi_desc)
{
const struct amd_iommu *iommu;
spinlock_t *lock;
}
}
-static int dump_intremap_mapping(const struct amd_iommu *iommu,
- struct ivrs_mappings *ivrs_mapping,
- uint16_t unused)
+static int cf_check dump_intremap_mapping(
+ const struct amd_iommu *iommu, struct ivrs_mappings *ivrs_mapping,
+ uint16_t unused)
{
unsigned long flags;
return 0;
}
-int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int flags, unsigned int *flush_flags)
+int cf_check amd_iommu_map_page(
+ struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags)
{
struct domain_iommu *hd = dom_iommu(d);
int rc;
return 0;
}
-int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
- unsigned int *flush_flags)
+int cf_check amd_iommu_unmap_page(
+ struct domain *d, dfn_t dfn, unsigned int *flush_flags)
{
unsigned long pt_mfn = 0;
struct domain_iommu *hd = dom_iommu(d);
return end - start;
}
-int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int flush_flags)
+int cf_check amd_iommu_flush_iotlb_pages(
+ struct domain *d, dfn_t dfn, unsigned long page_count,
+ unsigned int flush_flags)
{
unsigned long dfn_l = dfn_x(dfn);
return 0;
}
-int amd_iommu_flush_iotlb_all(struct domain *d)
+int cf_check amd_iommu_flush_iotlb_all(struct domain *d)
{
amd_iommu_flush_all_pages(d);
return rc;
}
-int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+int cf_check amd_iommu_get_reserved_device_memory(
+ iommu_grdm_t *func, void *ctxt)
{
unsigned int seg = 0 /* XXX */, bdf;
const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
return 0;
}
-int __init amd_iommu_quarantine_init(struct domain *d)
+int __init cf_check amd_iommu_quarantine_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
unsigned long end_gfn =
return 0;
}
-static int __init iov_detect(void)
+static int __init cf_check iov_detect(void)
{
if ( !iommu_enable && !iommu_intremap )
return 0;
return 0;
}
-static int iov_enable_xt(void)
+static int cf_check iov_enable_xt(void)
{
int rc;
unsigned int __read_mostly amd_iommu_max_paging_mode = 6;
int __read_mostly amd_iommu_min_paging_mode = 1;
-static int amd_iommu_domain_init(struct domain *d)
+static int cf_check amd_iommu_domain_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
return 0;
}
-static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev);
+static int cf_check amd_iommu_add_device(u8 devfn, struct pci_dev *pdev);
-static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
+static void __hwdom_init cf_check amd_iommu_hwdom_init(struct domain *d)
{
const struct amd_iommu *iommu;
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static int reassign_device(struct domain *source, struct domain *target,
- u8 devfn, struct pci_dev *pdev)
+static int cf_check reassign_device(
+ struct domain *source, struct domain *target, u8 devfn,
+ struct pci_dev *pdev)
{
struct amd_iommu *iommu;
int bdf, rc;
return 0;
}
-static int amd_iommu_assign_device(struct domain *d, u8 devfn,
- struct pci_dev *pdev,
- u32 flag)
+static int cf_check amd_iommu_assign_device(
+ struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
int bdf = PCI_BDF2(pdev->bus, devfn);
return rc;
}
-static void amd_iommu_clear_root_pgtable(struct domain *d)
+static void cf_check amd_iommu_clear_root_pgtable(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
spin_unlock(&hd->arch.mapping_lock);
}
-static void amd_iommu_domain_destroy(struct domain *d)
+static void cf_check amd_iommu_domain_destroy(struct domain *d)
{
iommu_identity_map_teardown(d);
ASSERT(!dom_iommu(d)->arch.amd.root_table);
}
-static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
+static int cf_check amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
u16 bdf;
return amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev);
}
-static int amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
+static int cf_check amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
u16 bdf;
return 0;
}
-static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
+static int cf_check amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
{
int bdf = PCI_BDF2(bus, devfn);
unmap_domain_page(table_vaddr);
}
-static void amd_dump_page_tables(struct domain *d)
+static void cf_check amd_dump_page_tables(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
* scan pci devices to add all existed PCI devices to alldevs_list,
* and setup pci hierarchy in array bus2bridge.
*/
-static int __init _scan_pci_devices(struct pci_seg *pseg, void *arg)
+static int __init cf_check _scan_pci_devices(struct pci_seg *pseg, void *arg)
{
struct pci_dev *pdev;
int bus, dev, func;
ctxt->d->domain_id, err);
}
-static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg)
+static int __hwdom_init cf_check _setup_hwdom_pci_devices(
+ struct pci_seg *pseg, void *arg)
{
struct setup_hwdom *ctxt = arg;
int bus, devfn;
}
#endif
-static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
+static int cf_check _dump_pci_devices(struct pci_seg *pseg, void *arg)
{
struct pci_dev *pdev;
return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
}
-int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+int cf_check intel_iommu_get_reserved_device_memory(
+ iommu_grdm_t *func, void *ctxt)
{
struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL;
unsigned int i;
void print_vtd_entries(struct vtd_iommu *iommu, int bus, int devfn, u64 gmfn);
keyhandler_fn_t cf_check vtd_dump_iommu_info;
-bool intel_iommu_supports_eim(void);
-int intel_iommu_enable_eim(void);
-void intel_iommu_disable_eim(void);
+bool cf_check intel_iommu_supports_eim(void);
+int cf_check intel_iommu_enable_eim(void);
+void cf_check intel_iommu_disable_eim(void);
int enable_qinval(struct vtd_iommu *iommu);
void disable_qinval(struct vtd_iommu *iommu);
int iommu_flush_iec_index(struct vtd_iommu *iommu, u8 im, u16 iidx);
void clear_fault_bits(struct vtd_iommu *iommu);
-int __must_check vtd_flush_context_reg(struct vtd_iommu *iommu, uint16_t did,
- uint16_t source_id,
- uint8_t function_mask, uint64_t type,
- bool flush_non_present_entry);
-int __must_check vtd_flush_iotlb_reg(struct vtd_iommu *iommu, uint16_t did,
- uint64_t addr, unsigned int size_order,
- uint64_t type,
- bool flush_non_present_entry,
- bool flush_dev_iotlb);
+int __must_check cf_check vtd_flush_context_reg(
+ struct vtd_iommu *iommu, uint16_t did, uint16_t source_id,
+ uint8_t function_mask, uint64_t type, bool flush_non_present_entry);
+int __must_check cf_check vtd_flush_iotlb_reg(
+ struct vtd_iommu *iommu, uint16_t did, uint64_t addr,
+ unsigned int size_order, uint64_t type, bool flush_non_present_entry,
+ bool flush_dev_iotlb);
struct vtd_iommu *ioapic_to_iommu(unsigned int apic_id);
struct vtd_iommu *hpet_to_iommu(unsigned int hpet_id);
u8 bus, u8 devfn, const struct pci_dev *);
int domain_context_unmap_one(struct domain *domain, struct vtd_iommu *iommu,
u8 bus, u8 devfn);
-int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
+int cf_check intel_iommu_get_reserved_device_memory(
+ iommu_grdm_t *func, void *ctxt);
-unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
-void io_apic_write_remap_rte(unsigned int apic,
- unsigned int reg, unsigned int value);
+unsigned int cf_check io_apic_read_remap_rte(
+ unsigned int apic, unsigned int reg);
+void cf_check io_apic_write_remap_rte(
+ unsigned int apic, unsigned int reg, unsigned int value);
struct msi_desc;
struct msi_msg;
-int msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
+int cf_check msi_msg_write_remap_rte(struct msi_desc *, struct msi_msg *);
-int intel_setup_hpet_msi(struct msi_desc *);
+int cf_check intel_setup_hpet_msi(struct msi_desc *);
int is_igd_vt_enabled_quirk(void);
bool is_azalia_tlb_enabled(const struct acpi_drhd_unit *);
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, hpetid_to_bdf(id));
}
-bool __init intel_iommu_supports_eim(void)
+bool __init cf_check intel_iommu_supports_eim(void)
{
struct acpi_drhd_unit *drhd;
unsigned int apic;
return 0;
}
-unsigned int io_apic_read_remap_rte(
+unsigned int cf_check io_apic_read_remap_rte(
unsigned int apic, unsigned int reg)
{
unsigned int ioapic_pin = (reg - 0x10) / 2;
return (*(((u32 *)&old_rte) + 0));
}
-void io_apic_write_remap_rte(
+void cf_check io_apic_write_remap_rte(
unsigned int apic, unsigned int reg, unsigned int value)
{
unsigned int ioapic_pin = (reg - 0x10) / 2;
return 0;
}
-int msi_msg_write_remap_rte(
+int cf_check msi_msg_write_remap_rte(
struct msi_desc *msi_desc, struct msi_msg *msg)
{
struct pci_dev *pdev = msi_desc->dev;
: -EINVAL;
}
-int __init intel_setup_hpet_msi(struct msi_desc *msi_desc)
+int __init cf_check intel_setup_hpet_msi(struct msi_desc *msi_desc)
{
struct vtd_iommu *iommu = hpet_to_iommu(msi_desc->hpet_id);
unsigned long flags;
* This function is used to enable Interrupt remapping when
* enable x2apic
*/
-int intel_iommu_enable_eim(void)
+int cf_check intel_iommu_enable_eim(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
* This function is used to disable Interrupt remapping when
* suspend local apic
*/
-void intel_iommu_disable_eim(void)
+void cf_check intel_iommu_disable_eim(void)
{
struct acpi_drhd_unit *drhd;
static struct iommu_ops vtd_ops;
static struct tasklet vtd_fault_tasklet;
-static int setup_hwdom_device(u8 devfn, struct pci_dev *);
+static int cf_check setup_hwdom_device(u8 devfn, struct pci_dev *);
static void setup_hwdom_rmrr(struct domain *d);
static bool domid_mapping(const struct vtd_iommu *iommu)
}
/* return value determine if we need a write buffer flush */
-int vtd_flush_context_reg(struct vtd_iommu *iommu, uint16_t did,
- uint16_t source_id, uint8_t function_mask,
- uint64_t type, bool flush_non_present_entry)
+int cf_check vtd_flush_context_reg(
+ struct vtd_iommu *iommu, uint16_t did, uint16_t source_id,
+ uint8_t function_mask, uint64_t type, bool flush_non_present_entry)
{
unsigned long flags;
}
/* return value determine if we need a write buffer flush */
-int vtd_flush_iotlb_reg(struct vtd_iommu *iommu, uint16_t did, uint64_t addr,
- unsigned int size_order, uint64_t type,
- bool flush_non_present_entry, bool flush_dev_iotlb)
+int cf_check vtd_flush_iotlb_reg(
+ struct vtd_iommu *iommu, uint16_t did, uint64_t addr,
+ unsigned int size_order, uint64_t type, bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
uint64_t val = type | DMA_TLB_IVT;
return ret;
}
-static int __must_check iommu_flush_iotlb_pages(struct domain *d,
- dfn_t dfn,
- unsigned long page_count,
- unsigned int flush_flags)
+static int __must_check cf_check iommu_flush_iotlb_pages(
+ struct domain *d, dfn_t dfn, unsigned long page_count,
+ unsigned int flush_flags)
{
ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
ASSERT(flush_flags);
page_count);
}
-static int __must_check iommu_flush_iotlb_all(struct domain *d)
+static int __must_check cf_check iommu_flush_iotlb_all(struct domain *d)
{
return iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
}
agaw = 64; \
agaw; })
-static int intel_iommu_domain_init(struct domain *d)
+static int cf_check intel_iommu_domain_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
return 0;
}
-static void __hwdom_init intel_iommu_hwdom_init(struct domain *d)
+static void __hwdom_init cf_check intel_iommu_hwdom_init(struct domain *d)
{
struct acpi_drhd_unit *drhd;
return ret;
}
-static void iommu_clear_root_pgtable(struct domain *d)
+static void cf_check iommu_clear_root_pgtable(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
spin_unlock(&hd->arch.mapping_lock);
}
-static void iommu_domain_teardown(struct domain *d)
+static void cf_check iommu_domain_teardown(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
const struct acpi_drhd_unit *drhd;
XFREE(hd->arch.vtd.iommu_bitmap);
}
-static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
- mfn_t mfn, unsigned int flags,
- unsigned int *flush_flags)
+static int __must_check cf_check intel_iommu_map_page(
+ struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags,
+ unsigned int *flush_flags)
{
struct domain_iommu *hd = dom_iommu(d);
struct dma_pte *page, *pte, old, new = {};
return rc;
}
-static int __must_check intel_iommu_unmap_page(struct domain *d, dfn_t dfn,
- unsigned int *flush_flags)
+static int __must_check cf_check intel_iommu_unmap_page(
+ struct domain *d, dfn_t dfn, unsigned int *flush_flags)
{
/* Do nothing if VT-d shares EPT page table */
if ( iommu_use_hap_pt(d) )
return 0;
}
-static int intel_iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
- unsigned int *flags)
+static int cf_check intel_iommu_lookup_page(
+ struct domain *d, dfn_t dfn, mfn_t *mfn, unsigned int *flags)
{
struct domain_iommu *hd = dom_iommu(d);
struct dma_pte *page, val;
(ept_has_1gb(ept_cap) && opt_hap_1gb) <= cap_sps_1gb(vtd_cap);
}
-static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
+static int cf_check intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
{
struct acpi_rmrr_unit *rmrr;
u16 bdf;
return 0;
}
-static int intel_iommu_enable_device(struct pci_dev *pdev)
+static int cf_check intel_iommu_enable_device(struct pci_dev *pdev)
{
struct acpi_drhd_unit *drhd = acpi_find_matched_drhd_unit(pdev);
int ret = drhd ? ats_device(pdev, drhd) : -ENODEV;
return ret >= 0 ? 0 : ret;
}
-static int intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
+static int cf_check intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
{
struct acpi_rmrr_unit *rmrr;
u16 bdf;
return domain_context_unmap(pdev->domain, devfn, pdev);
}
-static int __hwdom_init setup_hwdom_device(u8 devfn, struct pci_dev *pdev)
+static int __hwdom_init cf_check setup_hwdom_device(
+ u8 devfn, struct pci_dev *pdev)
{
return domain_context_mapping(pdev->domain, devfn, pdev);
}
uint32_t fectl;
} *__read_mostly iommu_state;
-static int __init vtd_setup(void)
+static int __init cf_check vtd_setup(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
return ret;
}
-static int reassign_device_ownership(
+static int cf_check reassign_device_ownership(
struct domain *source,
struct domain *target,
u8 devfn, struct pci_dev *pdev)
return ret;
}
-static int intel_iommu_assign_device(
+static int cf_check intel_iommu_assign_device(
struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag)
{
struct domain *s = pdev->domain;
return ret;
}
-static int intel_iommu_group_id(u16 seg, u8 bus, u8 devfn)
+static int cf_check intel_iommu_group_id(u16 seg, u8 bus, u8 devfn)
{
u8 secbus;
return PCI_BDF2(bus, devfn);
}
-static int __must_check vtd_suspend(void)
+static int __must_check cf_check vtd_suspend(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
return 0;
}
-static void vtd_crash_shutdown(void)
+static void cf_check vtd_crash_shutdown(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
}
}
-static void vtd_resume(void)
+static void cf_check vtd_resume(void)
{
struct acpi_drhd_unit *drhd;
struct vtd_iommu *iommu;
unmap_vtd_domain_page(pt_vaddr);
}
-static void vtd_dump_page_tables(struct domain *d)
+static void cf_check vtd_dump_page_tables(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
agaw_to_level(hd->arch.vtd.agaw), 0, 0);
}
-static int __init intel_iommu_quarantine_init(struct domain *d)
+static int __init cf_check intel_iommu_quarantine_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
struct page_info *pg;
return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
-static int __must_check flush_context_qi(struct vtd_iommu *iommu, u16 did,
- u16 sid, u8 fm, u64 type,
- bool flush_non_present_entry)
+static int __must_check cf_check flush_context_qi(
+ struct vtd_iommu *iommu, u16 did, u16 sid, u8 fm, u64 type,
+ bool flush_non_present_entry)
{
ASSERT(iommu->qinval_maddr);
type >> DMA_CCMD_INVL_GRANU_OFFSET);
}
-static int __must_check flush_iotlb_qi(struct vtd_iommu *iommu, u16 did,
- u64 addr,
- unsigned int size_order, u64 type,
- bool flush_non_present_entry,
- bool flush_dev_iotlb)
+static int __must_check cf_check flush_iotlb_qi(
+ struct vtd_iommu *iommu, u16 did, u64 addr, unsigned int size_order,
+ u64 type, bool flush_non_present_entry, bool flush_dev_iotlb)
{
u8 dr = 0, dw = 0;
int ret = 0, rc;
return 0;
}
-static int vtd_flush_context_noop(struct vtd_iommu *iommu, uint16_t did,
- uint16_t source_id, uint8_t function_mask,
- uint64_t type, bool flush_non_present_entry)
+static int cf_check vtd_flush_context_noop(
+ struct vtd_iommu *iommu, uint16_t did, uint16_t source_id,
+ uint8_t function_mask, uint64_t type, bool flush_non_present_entry)
{
WARN();
return -EIO;
}
-static int vtd_flush_iotlb_noop(struct vtd_iommu *iommu, uint16_t did,
- uint64_t addr, unsigned int size_order,
- uint64_t type, bool flush_non_present_entry,
- bool flush_dev_iotlb)
+static int cf_check vtd_flush_iotlb_noop(
+ struct vtd_iommu *iommu, uint16_t did, uint64_t addr,
+ unsigned int size_order, uint64_t type, bool flush_non_present_entry,
+ bool flush_dev_iotlb)
{
WARN();
return -EIO;