}
static union amd_iommu_pte clear_iommu_pte_present(unsigned long l1_mfn,
- unsigned long dfn)
+ unsigned long dfn,
+ unsigned int level)
{
union amd_iommu_pte *table, *pte, old;
table = map_domain_page(_mfn(l1_mfn));
- pte = &table[pfn_to_pde_idx(dfn, 1)];
+ pte = &table[pfn_to_pde_idx(dfn, level)];
old = *pte;
write_atomic(&pte->raw, 0);
return 0;
}
+static void queue_free_pt(struct domain_iommu *hd, mfn_t mfn, unsigned int level)
+{
+ if ( level > 1 )
+ {
+ union amd_iommu_pte *pt = map_domain_page(mfn);
+ unsigned int i;
+
+ for ( i = 0; i < PTE_PER_TABLE_SIZE; ++i )
+ if ( pt[i].pr && pt[i].next_level )
+ {
+ ASSERT(pt[i].next_level < level);
+ queue_free_pt(hd, _mfn(pt[i].mfn), pt[i].next_level);
+ }
+
+ unmap_domain_page(pt);
+ }
+
+ iommu_queue_free_pgtable(hd, mfn_to_page(mfn));
+}
+
int cf_check amd_iommu_map_page(
struct domain *d, dfn_t dfn, mfn_t mfn, unsigned int flags,
unsigned int *flush_flags)
{
struct domain_iommu *hd = dom_iommu(d);
+ unsigned int level = (IOMMUF_order(flags) / PTE_PER_TABLE_SHIFT) + 1;
int rc;
unsigned long pt_mfn = 0;
union amd_iommu_pte old;
+ ASSERT((hd->platform_ops->page_sizes >> IOMMUF_order(flags)) &
+ PAGE_SIZE_4K);
+
spin_lock(&hd->arch.mapping_lock);
/*
return rc;
}
- if ( iommu_pde_from_dfn(d, dfn_x(dfn), 1, &pt_mfn, flush_flags, true) ||
+ if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn, flush_flags, true) ||
!pt_mfn )
{
spin_unlock(&hd->arch.mapping_lock);
return -EFAULT;
}
- /* Install 4k mapping */
- old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), 1,
+ /* Install mapping */
+ old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), level,
(flags & IOMMUF_writable),
(flags & IOMMUF_readable));
*flush_flags |= IOMMU_FLUSHF_added;
if ( old.pr )
+ {
*flush_flags |= IOMMU_FLUSHF_modified;
+ if ( IOMMUF_order(flags) && old.next_level )
+ queue_free_pt(hd, _mfn(old.mfn), old.next_level);
+ }
+
return 0;
}
{
unsigned long pt_mfn = 0;
struct domain_iommu *hd = dom_iommu(d);
+ unsigned int level = (order / PTE_PER_TABLE_SHIFT) + 1;
union amd_iommu_pte old = {};
+ /*
+ * While really we could unmap at any granularity, for now we assume unmaps
+ * are issued by common code only at the same granularity as maps.
+ */
+ ASSERT((hd->platform_ops->page_sizes >> order) & PAGE_SIZE_4K);
+
spin_lock(&hd->arch.mapping_lock);
if ( !hd->arch.amd.root_table )
return 0;
}
- if ( iommu_pde_from_dfn(d, dfn_x(dfn), 1, &pt_mfn, flush_flags, false) )
+ if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn, flush_flags, false) )
{
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_ERROR("invalid IO pagetable entry dfn = %"PRI_dfn"\n",
if ( pt_mfn )
{
/* Mark PTE as 'page not present'. */
- old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn));
+ old = clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level);
}
spin_unlock(&hd->arch.mapping_lock);
if ( old.pr )
+ {
*flush_flags |= IOMMU_FLUSHF_modified;
+ if ( order && old.next_level )
+ queue_free_pt(hd, _mfn(old.mfn), old.next_level);
+ }
+
return 0;
}