return hd->platform_ops->unmap_page(d, gfn);
}
+void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
+ return;
+
+ hd->platform_ops->iotlb_flush(d, gfn, page_count);
+}
+
+void iommu_iotlb_flush_all(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
+ return;
+
+ hd->platform_ops->iotlb_flush_all(d);
+}
+
/* caller should hold the pcidevs_lock */
int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
{
}
}
+static void intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
+{
+ __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
+}
+
+static void intel_iommu_iotlb_flush_all(struct domain *d)
+{
+ __intel_iommu_iotlb_flush(d, 0, 0, 0);
+}
+
/* clear one page's page table */
static void dma_pte_clear_one(struct domain *domain, u64 addr)
{
.resume = vtd_resume,
.share_p2m = iommu_set_pgd,
.crash_shutdown = vtd_crash_shutdown,
+ .iotlb_flush = intel_iommu_iotlb_flush,
+ .iotlb_flush_all = intel_iommu_iotlb_flush_all,
};
/*
void (*resume)(void);
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
+ void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count);
+ void (*iotlb_flush_all)(struct domain *d);
};
void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value);
int iommu_do_domctl(struct xen_domctl *, XEN_GUEST_HANDLE(xen_domctl_t));
+void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
+void iommu_iotlb_flush_all(struct domain *d);
+
#endif /* _IOMMU_H_ */