if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) )
return 0;
+ if ( !nr )
+ return 1;
+
if ( grdm->used_entries < grdm->map.nr_entries )
{
struct xen_reserved_device_memory rdm = {
struct ivrs_unity_map {
bool read:1;
bool write:1;
+ bool global:1;
paddr_t addr;
unsigned long length;
struct ivrs_unity_map *next;
unsigned int flag);
int amd_iommu_reserve_domain_unity_unmap(struct domain *d,
const struct ivrs_unity_map *map);
+int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
unsigned long page_count,
unsigned int flush_flags);
static int __init reserve_unity_map_for_device(
uint16_t seg, uint16_t bdf, unsigned long base,
- unsigned long length, bool iw, bool ir)
+ unsigned long length, bool iw, bool ir, bool global)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
struct ivrs_unity_map *unity_map = ivrs_mappings[bdf].unity_map;
*/
if ( base == unity_map->addr && length == unity_map->length &&
ir == unity_map->read && iw == unity_map->write )
+ {
+ if ( global )
+ unity_map->global = true;
return 0;
+ }
if ( unity_map->addr + unity_map->length > base &&
base + length > unity_map->addr )
unity_map->read = ir;
unity_map->write = iw;
+ unity_map->global = global;
unity_map->addr = base;
unity_map->length = length;
unity_map->next = ivrs_mappings[bdf].unity_map;
/* reserve r/w unity-mapped page entries for devices */
for ( bdf = rc = 0; !rc && bdf < ivrs_bdf_entries; bdf++ )
- rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir);
+ rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir,
+ true);
}
return rc;
paddr_t length = limit + PAGE_SIZE - base;
/* reserve unity-mapped page entries for device */
- rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir) ?:
- reserve_unity_map_for_device(seg, req, base, length, iw, ir);
+ rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir,
+ false) ?:
+ reserve_unity_map_for_device(seg, req, base, length, iw, ir,
+ false);
}
else
{
req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
rc = reserve_unity_map_for_device(iommu->seg, bdf, base, length,
- iw, ir) ?:
+ iw, ir, false) ?:
reserve_unity_map_for_device(iommu->seg, req, base, length,
- iw, ir);
+ iw, ir, false);
}
return rc;
return rc;
}
+int amd_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+ unsigned int seg = 0 /* XXX */, bdf;
+ const struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
+ /* At least for global entries, avoid reporting them multiple times. */
+ enum { pending, processing, done } global = pending;
+
+ for ( bdf = 0; bdf < ivrs_bdf_entries; ++bdf )
+ {
+ pci_sbdf_t sbdf = PCI_SBDF2(seg, bdf);
+ const struct ivrs_unity_map *um = ivrs_mappings[bdf].unity_map;
+ unsigned int req = ivrs_mappings[bdf].dte_requestor_id;
+ const struct amd_iommu *iommu = ivrs_mappings[bdf].iommu;
+ int rc;
+
+ if ( !iommu )
+ {
+ /* May need to trigger the workaround in find_iommu_for_device(). */
+ const struct pci_dev *pdev;
+
+ pcidevs_lock();
+ pdev = pci_get_pdev(seg, sbdf.bus, sbdf.devfn);
+ pcidevs_unlock();
+
+ if ( pdev )
+ iommu = find_iommu_for_device(seg, bdf);
+ if ( !iommu )
+ continue;
+ }
+
+ if ( func(0, 0, sbdf.sbdf, ctxt) )
+ {
+ /*
+ * When the caller processes a XENMEM_RDM_ALL request, don't report
+ * multiple times the same range(s) for perhaps many devices with
+ * the same alias ID.
+ */
+ if ( bdf != req && ivrs_mappings[req].iommu &&
+ func(0, 0, PCI_SBDF2(seg, req).sbdf, ctxt) )
+ continue;
+
+ if ( global == pending )
+ global = processing;
+ }
+
+ if ( iommu->exclusion_enable &&
+ (iommu->exclusion_allow_all ?
+ global == processing :
+ ivrs_mappings[bdf].dte_allow_exclusion) )
+ {
+ rc = func(PFN_DOWN(iommu->exclusion_base),
+ PFN_UP(iommu->exclusion_limit | 1) -
+ PFN_DOWN(iommu->exclusion_base), sbdf.sbdf, ctxt);
+ if ( unlikely(rc < 0) )
+ return rc;
+ }
+
+ for ( ; um; um = um->next )
+ {
+ if ( um->global && global != processing )
+ continue;
+
+ rc = func(PFN_DOWN(um->addr), PFN_DOWN(um->length),
+ sbdf.sbdf, ctxt);
+ if ( unlikely(rc < 0) )
+ return rc;
+ }
+
+ if ( global == processing )
+ global = done;
+ }
+
+ return 0;
+}
+
int __init amd_iommu_quarantine_init(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
.crash_shutdown = amd_iommu_crash_shutdown,
+ .get_reserved_device_memory = amd_iommu_get_reserved_device_memory,
.dump_page_tables = amd_dump_page_tables,
};