x86/mm: clean up SHARED_M2P{,_ENTRY} uses
authorJan Beulich <jbeulich@suse.com>
Tue, 13 Feb 2018 16:28:36 +0000 (17:28 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 13 Feb 2018 16:28:36 +0000 (17:28 +0100)
Stop open-coding SHARED_M2P() and drop a pointless use of it from
paging_mfn_is_dirty() (!VALID_M2P() is a superset of SHARED_M2P()) and
another one from free_page_type() (prior assertions render this
redundant).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/mm.c
xen/arch/x86/mm/mem_sharing.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/mm/paging.c

index 35f204369bade4c9358136bcea38253ed4ab70fe..86942c726533989618c4fbb8b00dafe23611a10e 100644 (file)
@@ -2390,9 +2390,7 @@ int free_page_type(struct page_info *page, unsigned long type,
         ASSERT(!shadow_mode_refcounts(owner));
 
         gmfn = mfn_to_gmfn(owner, mfn_x(page_to_mfn(page)));
-        ASSERT(VALID_M2P(gmfn));
-        /* Page sharing not supported for shadowed domains */
-        if(!SHARED_M2P(gmfn))
+        if ( VALID_M2P(gmfn) )
             shadow_remove_all_shadows(owner, _mfn(gmfn));
     }
 
@@ -4217,7 +4215,7 @@ int xenmem_add_to_physmap_one(
 
     /* Unmap from old location, if any. */
     old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
-    ASSERT( old_gpfn != SHARED_M2P_ENTRY );
+    ASSERT(!SHARED_M2P(old_gpfn));
     if ( space == XENMAPSPACE_gmfn && old_gpfn != gfn )
     {
         rc = -EXDEV;
index 6f4be95515f6dd57275935164a57989d590dae25..57f54c55c812eabc0a1732fc9a7945113f296560 100644 (file)
@@ -409,7 +409,7 @@ static struct page_info* mem_sharing_lookup(unsigned long mfn)
             unsigned long t = read_atomic(&page->u.inuse.type_info);
             ASSERT((t & PGT_type_mask) == PGT_shared_page);
             ASSERT((t & PGT_count_mask) >= 2);
-            ASSERT(get_gpfn_from_mfn(mfn) == SHARED_M2P_ENTRY); 
+            ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
             return page;
         }
     }
@@ -469,7 +469,7 @@ static int audit(void)
         }
 
         /* Check the m2p entry */
-        if ( get_gpfn_from_mfn(mfn_x(mfn)) != SHARED_M2P_ENTRY )
+        if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
         {
            MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
                              mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
index ad6f9ef10dae2e2bf635ed582ff43aab93e02a97..753124bdcd7fc0d636824029f0aab5b9d5ea05e0 100644 (file)
@@ -1059,8 +1059,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                         {
                             m2pfn = get_gpfn_from_mfn(mfn+i1);
                             /* Allow shared M2Ps */
-                            if ( (m2pfn != (gfn + i1)) &&
-                                 (m2pfn != SHARED_M2P_ENTRY) )
+                            if ( (m2pfn != (gfn + i1)) && !SHARED_M2P(m2pfn) )
                             {
                                 pmbad++;
                                 P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
index dccd1425b40eef3ec4ddb1a4975c4f98bd84c8ee..48e50fb5d82a819ca2c529609eaea8d17f9ff73b 100644 (file)
@@ -2597,7 +2597,7 @@ void audit_p2m(struct domain *d,
             continue;
         }
 
-        if ( gfn == SHARED_M2P_ENTRY )
+        if ( SHARED_M2P(gfn) )
         {
             P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
                     mfn);
index f93ae4b80c73f661f0a82c5944932af5fbda8cfe..8a658b91187b2543318199c7d7f50842f3c6f0f1 100644 (file)
@@ -369,8 +369,8 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
 
     /* We /really/ mean PFN here, even for non-translated guests. */
     pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
-    /* Shared pages are always read-only; invalid pages can't be dirty. */
-    if ( unlikely(SHARED_M2P(pfn_x(pfn)) || !VALID_M2P(pfn_x(pfn))) )
+    /* Invalid pages can't be dirty. */
+    if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
         return 0;
 
     mfn = d->arch.paging.log_dirty.top;