p2m_type_t p2mt;
#endif
mfn_t mfn;
+#ifdef CONFIG_HAS_PASSTHROUGH
bool *dont_flush_p, dont_flush;
+#endif
int rc;
#ifdef CONFIG_X86
* Since we're likely to free the page below, we need to suspend
* xenmem_add_to_physmap()'s suppressing of IOMMU TLB flushes.
*/
+#ifdef CONFIG_HAS_PASSTHROUGH
dont_flush_p = &this_cpu(iommu_dont_flush_iotlb);
dont_flush = *dont_flush_p;
*dont_flush_p = false;
+#endif
rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
+#ifdef CONFIG_HAS_PASSTHROUGH
*dont_flush_p = dont_flush;
+#endif
/*
* With the lack of an IOMMU on some platforms, domains with DMA-capable
xatp->gpfn += start;
xatp->size -= start;
+#ifdef CONFIG_HAS_PASSTHROUGH
if ( is_iommu_enabled(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 1;
extra.ppage = &pages[0];
}
+#endif
while ( xatp->size > done )
{
}
}
+#ifdef CONFIG_HAS_PASSTHROUGH
if ( is_iommu_enabled(d) )
{
int ret;
if ( unlikely(ret) && rc >= 0 )
rc = ret;
}
+#endif
return rc;
}
return dfn_x(x) == dfn_x(y);
}
+#ifdef CONFIG_HAS_PASSTHROUGH
extern bool_t iommu_enable, iommu_enabled;
extern bool force_iommu, iommu_quarantine, iommu_verbose;
+#else
+#define iommu_enabled false
+#endif
#ifdef CONFIG_X86
extern enum __packed iommu_intremap {