return order;
}
-int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
- unsigned long page_count, unsigned int flags,
- unsigned int *flush_flags)
+long iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
- unsigned int order;
+ unsigned int order, j = 0;
int rc = 0;
if ( !is_iommu_enabled(d) )
order = mapping_order(hd, dfn, mfn, page_count - i);
+ if ( (flags & IOMMUF_preempt) &&
+ ((!(++j & 0xfff) && general_preempt_check()) ||
+ i > LONG_MAX - (1UL << order)) )
+ return i;
+
rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
flags | IOMMUF_order(order), flush_flags);
d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
/* while statement to satisfy __must_check */
- while ( iommu_unmap(d, dfn0, i, flush_flags) )
+ while ( iommu_unmap(d, dfn0, i, 0, flush_flags) )
break;
if ( !is_hardware_domain(d) )
unsigned long page_count, unsigned int flags)
{
unsigned int flush_flags = 0;
- int rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
+ int rc;
+
+ ASSERT(!(flags & IOMMUF_preempt));
+ rc = iommu_map(d, dfn, mfn, page_count, flags, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
return rc;
}
-int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
- unsigned int *flush_flags)
+long iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
+ unsigned int flags, unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
- unsigned int order;
+ unsigned int order, j = 0;
int rc = 0;
if ( !is_iommu_enabled(d) )
return 0;
+ ASSERT(!(flags & ~IOMMUF_preempt));
+
for ( i = 0; i < page_count; i += 1UL << order )
{
dfn_t dfn = dfn_add(dfn0, i);
int err;
order = mapping_order(hd, dfn, _mfn(0), page_count - i);
+
+ if ( (flags & IOMMUF_preempt) &&
+ ((!(++j & 0xfff) && general_preempt_check()) ||
+ i > LONG_MAX - (1UL << order)) )
+ return i;
+
err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
- order, flush_flags);
+ flags | IOMMUF_order(order), flush_flags);
if ( likely(!err) )
continue;
int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned long page_count)
{
unsigned int flush_flags = 0;
- int rc = iommu_unmap(d, dfn, page_count, &flush_flags);
+ int rc = iommu_unmap(d, dfn, page_count, 0, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
rc = iommu_iotlb_flush(d, dfn, page_count, flush_flags);
void arch_iommu_hwdom_init(struct domain *d);
/*
- * The following flags are passed to map operations and passed by lookup
- * operations.
+ * The following flags are passed to map (applicable ones also to unmap)
+ * operations, while some are passed back by lookup operations.
*/
#define IOMMUF_order(n) ((n) & 0x3f)
#define _IOMMUF_readable 6
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 7
#define IOMMUF_writable (1u<<_IOMMUF_writable)
+#define IOMMUF_preempt (1u << 8)
/*
* flush_flags:
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
#define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all)
-int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned long page_count, unsigned int flags,
- unsigned int *flush_flags);
-int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int *flush_flags);
+/*
+ * For both of these: Negative return values are error indicators. Zero
+ * indicates full successful completion of the request, while positive
+ * values indicate partial completion, which is possible only with
+ * IOMMUF_preempt passed in.
+ */
+long __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags);
+long __must_check iommu_unmap(struct domain *d, dfn_t dfn,
+ unsigned long page_count, unsigned int flags,
+ unsigned int *flush_flags);
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned long page_count,