memory hotadd 4/7: Setup frametable for hot-added memory
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Dec 2009 08:56:04 +0000 (08:56 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Dec 2009 08:56:04 +0000 (08:56 +0000)
We can't use alloc_boot_pages for memory hot-add, so change it to use
the pages range passed in.

One changes need notice is, when memory hotplug needed, we have to
setup initial frametable as pdx index (i.e. the pdx_gorund_valid)
aligned, to make sure mfn_valid() still works after the max_page is
not maximum anymore.

Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
xen/arch/x86/mm.c
xen/arch/x86/setup.c
xen/arch/x86/x86_64/mm.c
xen/include/asm-x86/page.h

index f38ea4da4647fcedab0235ecd1602ec0448fa6ca..948fabb54b74390cce2802e9c15e2766fcc43a7d 100644 (file)
@@ -219,8 +219,16 @@ void __init init_frametable(void)
         init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
                               pdx_to_page(eidx * PDX_GROUP_COUNT));
     }
-    init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
-                          pdx_to_page(max_pdx - 1) + 1);
+    if ( !mem_hotplug )
+        init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
+                              pdx_to_page(max_pdx - 1) + 1);
+    else
+    {
+        init_frametable_chunk(pdx_to_page(sidx *PDX_GROUP_COUNT),
+                              pdx_to_page(max_idx * PDX_GROUP_COUNT));
+        memset(pdx_to_page(max_pdx), -1, (unsigned long)pdx_to_page(max_idx) -
+                        (unsigned long)(pdx_to_page(max_pdx)));
+    }
 }
 
 void __init arch_init_memory(void)
index d1745ba94d641615765e3e8266446b09d86692d0..0832bd667bb984be803499c65ca6b672e98a35b0 100644 (file)
@@ -304,7 +304,7 @@ static void __init setup_max_pdx(void)
 #endif
 }
 
-static void __init set_pdx_range(unsigned long smfn, unsigned long emfn)
+void set_pdx_range(unsigned long smfn, unsigned long emfn)
 {
     unsigned long idx, eidx;
 
index 46f4d0771acf75fdef03af27bda84bfc6f88a9e2..89ba4cbd8dbf09063c9fb373792ef394005353d5 100644 (file)
@@ -801,6 +801,116 @@ int __cpuinit setup_compat_arg_xlat(unsigned int cpu, int node)
     return 0;
 }
 
+void cleanup_frame_table(struct mem_hotadd_info *info)
+{
+    unsigned long sva, eva;
+    l3_pgentry_t l3e;
+    l2_pgentry_t l2e;
+    unsigned long spfn, epfn;
+
+    spfn = info->spfn;
+    epfn = info->epfn;
+
+    sva = (unsigned long)pdx_to_page(pfn_to_pdx(spfn));
+    eva = (unsigned long)pdx_to_page(pfn_to_pdx(epfn));
+
+    /* Intialize all page */
+    memset(mfn_to_page(spfn), -1, mfn_to_page(epfn) - mfn_to_page(spfn));
+
+    while (sva < eva)
+    {
+        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(sva)])[
+          l3_table_offset(sva)];
+        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
+             (l3e_get_flags(l3e) & _PAGE_PSE) )
+        {
+            sva = (sva & ~((1UL << L3_PAGETABLE_SHIFT) - 1)) +
+                    (1UL << L3_PAGETABLE_SHIFT);
+            continue;
+        }
+
+        l2e = l3e_to_l2e(l3e)[l2_table_offset(sva)];
+        ASSERT(l2e_get_flags(l2e) & _PAGE_PRESENT);
+
+        if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) ==
+              (_PAGE_PSE | _PAGE_PRESENT) )
+        {
+            if (hotadd_mem_valid(l2e_get_pfn(l2e), info))
+                destroy_xen_mappings(sva & ~((1UL << L2_PAGETABLE_SHIFT) - 1),
+                         ((sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) +
+                            (1UL << L2_PAGETABLE_SHIFT) - 1));
+
+            sva = (sva & ~((1UL << L2_PAGETABLE_SHIFT) -1 )) +
+                  (1UL << L2_PAGETABLE_SHIFT);
+            continue;
+        }
+
+        ASSERT(l1e_get_flags(l2e_to_l1e(l2e)[l1_table_offset(sva)]) &
+                _PAGE_PRESENT);
+         sva = (sva & ~((1UL << PAGE_SHIFT) - 1)) +
+                    (1UL << PAGE_SHIFT);
+    }
+
+    /* Brute-Force flush all TLB */
+    flush_tlb_all();
+}
+
+/* Should we be paraniod failure in map_pages_to_xen? */
+static int setup_frametable_chunk(void *start, void *end,
+                                  struct mem_hotadd_info *info)
+{
+    unsigned long s = (unsigned long)start;
+    unsigned long e = (unsigned long)end;
+    unsigned long mfn;
+
+    ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
+    ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
+
+    for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
+    {
+        mfn = alloc_hotadd_mfn(info);
+        map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
+    }
+    memset(start, -1, s - (unsigned long)start);
+
+    return 0;
+}
+
+int extend_frame_table(struct mem_hotadd_info *info)
+{
+    unsigned long cidx, nidx, eidx, spfn, epfn;
+
+    spfn = info->spfn;
+    epfn = info->epfn;
+
+    eidx = (pfn_to_pdx(epfn) + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
+    nidx = cidx = pfn_to_pdx(spfn)/PDX_GROUP_COUNT;
+
+    ASSERT( pfn_to_pdx(epfn) <= (DIRECTMAP_SIZE >> PAGE_SHIFT) &&
+         (pfn_to_pdx(epfn) <= FRAMETABLE_SIZE / sizeof(struct page_info)) );
+
+    if ( test_bit(cidx, pdx_group_valid) )
+        cidx = find_next_zero_bit(pdx_group_valid, eidx, cidx);
+
+    if ( cidx >= eidx )
+        return 0;
+
+    while ( cidx < eidx )
+    {
+        nidx = find_next_bit(pdx_group_valid, eidx, cidx);
+        if ( nidx >= eidx )
+            nidx = eidx;
+        setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
+                                     pdx_to_page(nidx * PDX_GROUP_COUNT),
+                                     info);
+
+        cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
+    }
+
+    memset(mfn_to_page(spfn), 0, mfn_to_page(epfn) - mfn_to_page(spfn));
+    return 0;
+}
+
 void __init subarch_init_memory(void)
 {
     unsigned long i, n, v, m2p_start_mfn;
index e5c18061be3344a0a20a4acfafe91eb174e92dee..1997863719d9305416d2da480e6ee58f0bbab19a 100644 (file)
@@ -360,6 +360,8 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
 l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
 #endif
 
+extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
+
 /* Map machine page range in Xen virtual address space. */
 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
 int map_pages_to_xen(