x86/P2M: move map_domain_gfn() (again)
authorJan Beulich <jbeulich@suse.com>
Wed, 9 Feb 2022 11:48:59 +0000 (12:48 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 9 Feb 2022 11:48:59 +0000 (12:48 +0100)
The main user is the guest walking code, so move it back there; commit
9a6787cc3809 ("x86/mm: build map_domain_gfn() just once") would perhaps
better have kept it there in the first place. This way it'll only get
built when it's actually needed (and still only once).

This also eliminates one more CONFIG_HVM conditional from p2m.c.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/mm/guest_walk.c
xen/arch/x86/mm/p2m.c

index b9f607272c3950d12357bc31d4217709bae8f1ff..35d543ca5f96bed4d7a6c65b7ca6690af9a9cf7d 100644 (file)
@@ -532,6 +532,56 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m,
     return walk_ok;
 }
 
+#if GUEST_PAGING_LEVELS == CONFIG_PAGING_LEVELS
+/*
+ * If the map is non-NULL, we leave this function having acquired an extra ref
+ * on mfn_to_page(*mfn).  In all cases, *pfec contains appropriate
+ * synthetic/structure PFEC_* bits.
+ */
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+                     p2m_query_t q, uint32_t *pfec)
+{
+    p2m_type_t p2mt;
+    struct page_info *page;
+
+    if ( !gfn_valid(p2m->domain, gfn) )
+    {
+        *pfec = PFEC_reserved_bit | PFEC_page_present;
+        return NULL;
+    }
+
+    /* Translate the gfn, unsharing if shared. */
+    page = p2m_get_page_from_gfn(p2m, gfn, &p2mt, NULL, q);
+    if ( p2m_is_paging(p2mt) )
+    {
+        ASSERT(p2m_is_hostp2m(p2m));
+        if ( page )
+            put_page(page);
+        p2m_mem_paging_populate(p2m->domain, gfn);
+        *pfec = PFEC_page_paged;
+        return NULL;
+    }
+    if ( p2m_is_shared(p2mt) )
+    {
+        if ( page )
+            put_page(page);
+        *pfec = PFEC_page_shared;
+        return NULL;
+    }
+    if ( !page )
+    {
+        *pfec = 0;
+        return NULL;
+    }
+
+    *pfec = PFEC_page_present;
+    *mfn = page_to_mfn(page);
+    ASSERT(mfn_valid(*mfn));
+
+    return map_domain_page(*mfn);
+}
+#endif
+
 /*
  * Local variables:
  * mode: C
index 27cb91d18a2398e9c66d838e4e040be476c64f67..c1cff377093cb1c1379d21dc5c27c02b5d3de249 100644 (file)
@@ -1965,58 +1965,6 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
     return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
 }
 
-#endif /* CONFIG_HVM */
-
-/*
- * If the map is non-NULL, we leave this function having acquired an extra ref
- * on mfn_to_page(*mfn).  In all cases, *pfec contains appropriate
- * synthetic/structure PFEC_* bits.
- */
-void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
-                     p2m_query_t q, uint32_t *pfec)
-{
-    p2m_type_t p2mt;
-    struct page_info *page;
-
-    if ( !gfn_valid(p2m->domain, gfn) )
-    {
-        *pfec = PFEC_reserved_bit | PFEC_page_present;
-        return NULL;
-    }
-
-    /* Translate the gfn, unsharing if shared. */
-    page = p2m_get_page_from_gfn(p2m, gfn, &p2mt, NULL, q);
-    if ( p2m_is_paging(p2mt) )
-    {
-        ASSERT(p2m_is_hostp2m(p2m));
-        if ( page )
-            put_page(page);
-        p2m_mem_paging_populate(p2m->domain, gfn);
-        *pfec = PFEC_page_paged;
-        return NULL;
-    }
-    if ( p2m_is_shared(p2mt) )
-    {
-        if ( page )
-            put_page(page);
-        *pfec = PFEC_page_shared;
-        return NULL;
-    }
-    if ( !page )
-    {
-        *pfec = 0;
-        return NULL;
-    }
-
-    *pfec = PFEC_page_present;
-    *mfn = page_to_mfn(page);
-    ASSERT(mfn_valid(*mfn));
-
-    return map_domain_page(*mfn);
-}
-
-#ifdef CONFIG_HVM
-
 static unsigned int mmio_order(const struct domain *d,
                                unsigned long start_fn, unsigned long nr)
 {