x86: Disable set_gpfn_from_mfn until m2p table is allocated.
authorKeir Fraser <keir@xen.org>
Fri, 10 Jun 2011 07:18:33 +0000 (08:18 +0100)
committerKeir Fraser <keir@xen.org>
Fri, 10 Jun 2011 07:18:33 +0000 (08:18 +0100)
This is a prerequisite for calling set_gpfn_from_mfn() unconditionally
from free_heap_pages().

Signed-off-by: Keir Fraser <keir@xen.org>
xen/arch/x86/x86_64/mm.c
xen/include/asm-x86/mm.h

index 3d7a8bcba2719e0bd5016ca67b93260e1714f3f9..effd52a4c4419622d88b1752890ab031ad84e44d 100644 (file)
@@ -47,6 +47,8 @@ unsigned int __read_mostly pfn_pdx_hole_shift = 0;
 
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
+bool_t __read_mostly machine_to_phys_mapping_valid = 0;
+
 /* Top-level master (and idle-domain) page directory. */
 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
     idle_pg_table[L4_PAGETABLE_ENTRIES];
@@ -800,6 +802,8 @@ void __init paging_init(void)
 #undef CNT
 #undef MFN
 
+    machine_to_phys_mapping_valid = 1;
+
     /* Set up linear page table mapping. */
     l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
               l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
index bc88a91ffb6d562862e889cf4ef3ab6cdc005819..40dbbf61e473b3ab8b3625883b2a2de7bf9f74f4 100644 (file)
@@ -470,7 +470,7 @@ TYPE_SAFE(unsigned long,mfn);
 
 #ifdef CONFIG_COMPAT
 #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
-#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
     struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
     unsigned long entry = (d && (d == dom_cow)) ?              \
         SHARED_M2P_ENTRY : (pfn);                              \
@@ -479,7 +479,7 @@ TYPE_SAFE(unsigned long,mfn);
      machine_to_phys_mapping[(mfn)] = (entry));                \
     })
 #else
-#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
     struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
     if(d && (d == dom_cow))                                    \
         machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY;     \
@@ -487,6 +487,17 @@ TYPE_SAFE(unsigned long,mfn);
         machine_to_phys_mapping[(mfn)] = (pfn);                \
     })
 #endif
+
+/*
+ * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
+ * the machine_to_phys_mapping is actually set up.
+ */
+extern bool_t machine_to_phys_mapping_valid;
+#define set_gpfn_from_mfn(mfn, pfn) do {        \
+    if ( machine_to_phys_mapping_valid )        \
+        _set_gpfn_from_mfn(mfn, pfn);           \
+} while (0)
+
 #define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 
 #define mfn_to_gmfn(_d, mfn)                            \