{
bool_t need_flush = 0;
struct domain_iommu *hd = dom_iommu(d);
+ int rc;
unsigned long pt_mfn[7];
unsigned int merge_level;
- BUG_ON( !hd->arch.root_table );
-
if ( iommu_use_hap_pt(d) )
return 0;
spin_lock(&hd->arch.mapping_lock);
+ rc = amd_iommu_alloc_root(hd);
+ if ( rc )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ AMD_IOMMU_DEBUG("Root table alloc failed, gfn = %lx\n", gfn);
+ domain_crash(d);
+ return rc;
+ }
+
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
if ( is_hvm_domain(d) )
unsigned long pt_mfn[7];
struct domain_iommu *hd = dom_iommu(d);
- BUG_ON( !hd->arch.root_table );
-
if ( iommu_use_hap_pt(d) )
return 0;
spin_lock(&hd->arch.mapping_lock);
+ if ( !hd->arch.root_table )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return 0;
+ }
+
/* Since HVM domain is initialized with 2 level IO page table,
* we might need a deeper page table for lager gfn now */
if ( is_hvm_domain(d) )
return scan_pci_devices();
}
-static int allocate_domain_resources(struct domain_iommu *hd)
+int amd_iommu_alloc_root(struct domain_iommu *hd)
{
- /* allocate root table */
- spin_lock(&hd->arch.mapping_lock);
- if ( !hd->arch.root_table )
+ if ( unlikely(!hd->arch.root_table) )
{
hd->arch.root_table = alloc_amd_iommu_pgtable();
if ( !hd->arch.root_table )
- {
- spin_unlock(&hd->arch.mapping_lock);
return -ENOMEM;
- }
}
- spin_unlock(&hd->arch.mapping_lock);
+
return 0;
}
+static int __must_check allocate_domain_resources(struct domain_iommu *hd)
+{
+ int rc;
+
+ spin_lock(&hd->arch.mapping_lock);
+ rc = amd_iommu_alloc_root(hd);
+ spin_unlock(&hd->arch.mapping_lock);
+
+ return rc;
+}
+
static int get_paging_mode(unsigned long entries)
{
int level = 1;
{
struct domain_iommu *hd = dom_iommu(d);
- /* allocate page directroy */
- if ( allocate_domain_resources(hd) != 0 )
- {
- if ( hd->arch.root_table )
- free_domheap_page(hd->arch.root_table);
- return -ENOMEM;
- }
-
/* For pv and dom0, stick with get_paging_mode(max_page)
* For HVM dom0, use 2 level page table at first */
hd->arch.paging_mode = is_hvm_domain(d) ?
unsigned long i;
const struct amd_iommu *iommu;
+ if ( allocate_domain_resources(dom_iommu(d)) )
+ BUG();
+
if ( !iommu_passthrough && !need_iommu(d) )
{
int rc = 0;
u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
- int bdf;
+ int bdf, rc;
struct domain_iommu *t = dom_iommu(target);
bdf = PCI_BDF2(pdev->bus, pdev->devfn);
pdev->domain = target;
}
- /* IO page tables might be destroyed after pci-detach the last device
- * In this case, we have to re-allocate root table for next pci-attach.*/
- if ( t->arch.root_table == NULL )
- allocate_domain_resources(t);
+ rc = allocate_domain_resources(t);
+ if ( rc )
+ return rc;
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
unsigned long mfn, unsigned int flags);
int __must_check amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
u64 amd_iommu_get_next_table_from_pte(u32 *entry);
+int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
u64 phys_addr, unsigned long size,
int iw, int ir);