From: Jan Beulich Date: Mon, 25 Jul 2022 13:41:48 +0000 (+0200) Subject: VT-d: replace all-contiguous page tables by superpage mappings X-Git-Tag: archive/raspbian/4.17.0-1+rpi1^2~33^2~392 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=3eb5c23542fabeeaff3c20a1fb1f6dddc2874890;p=xen.git VT-d: replace all-contiguous page tables by superpage mappings When a page table ends up with all contiguous entries (including all identical attributes), it can be replaced by a superpage entry at the next higher level. The page table itself can then be scheduled for freeing. The adjustment to LEVEL_MASK is merely to avoid leaving a latent trap for whenever we (and obviously hardware) start supporting 512G mappings. Note that cache sync-ing is likely more strict than necessary. This is both to be on the safe side as well as to maintain the pattern of all updates of (potentially) live tables being accompanied by a flush (if so needed). Signed-off-by: Jan Beulich Reviewed-by: Kevin Tian Reviewed-by: Roger Pau Monné Reviewed-by: Paul Durrant --- diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 745c7bae0d..35545bfaef 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -2211,14 +2211,35 @@ static int __must_check cf_check intel_iommu_map_page( * While the (ab)use of PTE_kind_table here allows to save some work in * the function, the main motivation for it is that it avoids a so far * unexplained hang during boot (while preparing Dom0) on a Westmere - * based laptop. + * based laptop. This also has the intended effect of terminating the + * loop when super pages aren't supported anymore at the next level. */ - pt_update_contig_markers(&page->val, - address_level_offset(dfn_to_daddr(dfn), level), - level, - (hd->platform_ops->page_sizes & - (1UL << level_to_offset_bits(level + 1)) - ? PTE_kind_leaf : PTE_kind_table)); + while ( pt_update_contig_markers(&page->val, + address_level_offset(dfn_to_daddr(dfn), level), + level, + (hd->platform_ops->page_sizes & + (1UL << level_to_offset_bits(level + 1)) + ? PTE_kind_leaf : PTE_kind_table)) ) + { + struct page_info *pg = maddr_to_page(pg_maddr); + + unmap_vtd_domain_page(page); + + new.val &= ~(LEVEL_MASK << level_to_offset_bits(level)); + dma_set_pte_superpage(new); + + pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), ++level, + flush_flags, false); + BUG_ON(pg_maddr < PAGE_SIZE); + + page = map_vtd_domain_page(pg_maddr); + pte = &page[address_level_offset(dfn_to_daddr(dfn), level)]; + *pte = new; + iommu_sync_cache(pte, sizeof(*pte)); + + *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all; + iommu_queue_free_pgtable(hd, pg); + } spin_unlock(&hd->arch.mapping_lock); unmap_vtd_domain_page(page); diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h index 7c3fe40150..78aa8a96f5 100644 --- a/xen/drivers/passthrough/vtd/iommu.h +++ b/xen/drivers/passthrough/vtd/iommu.h @@ -232,7 +232,7 @@ struct context_entry { /* page table handling */ #define LEVEL_STRIDE (9) -#define LEVEL_MASK ((1 << LEVEL_STRIDE) - 1) +#define LEVEL_MASK (PTE_NUM - 1UL) #define PTE_NUM (1 << LEVEL_STRIDE) #define level_to_agaw(val) ((val) - 2) #define agaw_to_level(val) ((val) + 2)