That original patch caused alloc_ivrs_mappings() to be called too
early, so things get moved back to where they were, just converting
the single call there to a loop over all IOMMUs.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
spin_lock_init(&iommu->lock);
iommu->seg = ivhd_block->pci_segment;
- if (alloc_ivrs_mappings(ivhd_block->pci_segment)) {
- xfree(iommu);
- return -ENOMEM;
- }
iommu->bdf = ivhd_block->header.dev_id;
iommu->cap_offset = ivhd_block->cap_offset;
iommu->mmio_base_phys = ivhd_block->mmio_base;
return rc;
}
-int __init alloc_ivrs_mappings(u16 seg)
+static int __init alloc_ivrs_mappings(u16 seg)
{
struct ivrs_mappings *ivrs_mappings;
int bdf;
goto error_out;
radix_tree_init(&ivrs_maps);
- if ( alloc_ivrs_mappings(0) != 0 )
- goto error_out;
+ for_each_amd_iommu ( iommu )
+ if ( alloc_ivrs_mappings(iommu->seg) != 0 )
+ goto error_out;
if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
goto error_out;
extern unsigned short ivrs_bdf_entries;
-int alloc_ivrs_mappings(u16 seg);
struct ivrs_mappings *get_ivrs_mappings(u16 seg);
int iterate_ivrs_mappings(int (*)(u16 seg, struct ivrs_mappings *));
int iterate_ivrs_entries(int (*)(u16 seg, struct ivrs_mappings *));