From a5b0eb363694e7e15405f0b3fc5fb6fab79df1db Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Mon, 17 Dec 2018 09:22:59 +0000 Subject: [PATCH] x86/mm/p2m: stop checking for IOMMU shared page tables in mmio_order() Now that the iommu_map() and iommu_unmap() operations take an order parameter and elide flushing there's no strong reason why modifying MMIO ranges in the p2m should be restricted to a 4k granularity simply because the IOMMU is enabled but shared page tables are not in operation. Signed-off-by: Paul Durrant Reviewed-by: Jan Beulich --- xen/arch/x86/mm/p2m.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 1b3f2ff048..5451f16eff 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -2210,13 +2210,12 @@ static unsigned int mmio_order(const struct domain *d, unsigned long start_fn, unsigned long nr) { /* - * Note that the !iommu_use_hap_pt() here has three effects: - * - cover iommu_{,un}map_page() not having an "order" input yet, + * Note that the !hap_enabled() here has two effects: * - exclude shadow mode (which doesn't support large MMIO mappings), * - exclude PV guests, should execution reach this code for such. * So be careful when altering this. */ - if ( !iommu_use_hap_pt(d) || + if ( !hap_enabled(d) || (start_fn & ((1UL << PAGE_ORDER_2M) - 1)) || !(nr >> PAGE_ORDER_2M) ) return PAGE_ORDER_4K; -- 2.30.2