x86/pod: prevent infinite loop when shattering large pages
authorJulien Grall <julien.grall@linaro.org>
Tue, 28 Nov 2017 12:11:55 +0000 (13:11 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 28 Nov 2017 12:11:55 +0000 (13:11 +0100)
When populating pages, the PoD may need to split large ones using
p2m_set_entry and request the caller to retry (see ept_get_entry for
instance).

p2m_set_entry may fail to shatter if it is not possible to allocate
memory for the new page table. However, the error is not propagated
resulting to the callers to retry infinitely the PoD.

Prevent the infinite loop by return false when it is not possible to
shatter the large mapping.

This is XSA-246.

Signed-off-by: Julien Grall <julien.grall@linaro.org>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/mm/p2m-pod.c

index 0a811ccf284a06ec98e5641ebd8d6d735ecf20d5..7ba56b14ab96a215d94a70f079b2727875ba3c2e 100644 (file)
@@ -1113,9 +1113,8 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
          * NOTE: In a fine-grained p2m locking scenario this operation
          * may need to promote its locking from gfn->1g superpage
          */
-        p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
-                      p2m_populate_on_demand, p2m->default_access);
-        return true;
+        return !p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
+                              p2m_populate_on_demand, p2m->default_access);
     }
 
     /* Only reclaim if we're in actual need of more cache. */
@@ -1147,8 +1146,12 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn,
 
     BUG_ON((mfn_x(mfn) & ((1UL << order) - 1)) != 0);
 
-    p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
-                  p2m->default_access);
+    if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+                       p2m->default_access) )
+    {
+        p2m_pod_cache_add(p2m, p, order);
+        goto out_fail;
+    }
 
     for( i = 0; i < (1UL << order); i++ )
     {
@@ -1193,14 +1196,17 @@ remap_and_retry:
     BUG_ON(order != PAGE_ORDER_2M);
     pod_unlock(p2m);
 
-    /* Remap this 2-meg region in singleton chunks */
     /*
+     * Remap this 2-meg region in singleton chunks. See the comment on the
+     * 1G page splitting path above for why a single call suffices.
+     *
      * NOTE: In a p2m fine-grained lock scenario this might
      * need promoting the gfn lock from gfn->2M superpage.
      */
-    for ( i = 0; i < (1UL << order); i++ )
-        p2m_set_entry(p2m, gfn_add(gfn_aligned, i), INVALID_MFN, PAGE_ORDER_4K,
-                      p2m_populate_on_demand, p2m->default_access);
+    if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
+                       p2m_populate_on_demand, p2m->default_access) )
+        return false;
+
     if ( tb_init_done )
     {
         struct {