extern u8 boot_edid_info[128];
extern struct boot_video_info boot_vid_info;
-/*
- * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
- * page_info table and allocation bitmap.
- */
-static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
-#if defined(CONFIG_X86_64)
-integer_param("xenheap_megabytes", opt_xenheap_megabytes);
-#endif
-
/* opt_nosmp: If true, secondary processors are ignored. */
static int opt_nosmp = 0;
boolean_param("nosmp", opt_nosmp);
unsigned long xen_phys_start;
+#ifdef CONFIG_X86_32
/* Limits of Xen heap, used to initialise the allocator. */
unsigned long xenheap_phys_start, xenheap_phys_end;
+#endif
extern void arch_init_memory(void);
extern void init_IRQ(void);
multiboot_info_t *mbi = __va(mbi_p);
module_t *mod = (module_t *)__va(mbi->mods_addr);
unsigned long nr_pages, modules_length;
+ unsigned long allocator_bitmap_end;
int i, e820_warn = 0, bytes = 0;
struct ns16550_defaults ns16550 = {
.data_bits = 8,
/* Sanitise the raw E820 map to produce a final clean version. */
max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr);
-#ifdef CONFIG_X86_64
- /*
- * On x86/64 we are able to account for the allocation bitmap
- * (allocated in common/page_alloc.c:init_boot_allocator()) stealing
- * from the Xen heap. Here we make the Xen heap appropriately larger.
- */
- opt_xenheap_megabytes += (max_page / 8) >> 20;
-#endif
-
- /*
- * Since there are some stubs getting built on the stacks which use
- * direct calls/jumps, the heap must be confined to the lower 2G so
- * that those branches can reach their targets.
- */
- if ( opt_xenheap_megabytes > 2048 )
- opt_xenheap_megabytes = 2048;
-
/* Create a temporary copy of the E820 map. */
memcpy(&boot_e820, &e820, sizeof(e820));
s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
#if defined(CONFIG_X86_64)
+#define reloc_size ((__pa(&_end) + mask) & ~mask)
/* Is the region suitable for relocating Xen? */
- if ( !xen_phys_start && (((e-s) >> 20) >= opt_xenheap_megabytes) )
+ if ( !xen_phys_start && ((e-s) >= reloc_size) )
{
extern l2_pgentry_t l2_xenmap[];
l4_pgentry_t *pl4e;
int i, j, k;
/* Select relocation address. */
- e = (e - (opt_xenheap_megabytes << 20)) & ~mask;
+ e -= reloc_size;
xen_phys_start = e;
bootsym(trampoline_xen_phys_start) = e;
EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n");
reserve_e820_ram(&boot_e820, initial_images_start, initial_images_end);
- /* Initialise Xen heap and boot heap. */
- xenheap_phys_start = init_boot_allocator(__pa(&_end));
- xenheap_phys_end = opt_xenheap_megabytes << 20;
-#if defined(CONFIG_X86_64)
+ /* Initialise boot heap. */
+ allocator_bitmap_end = init_boot_allocator(__pa(&_end));
+#if defined(CONFIG_X86_32)
+ xenheap_phys_start = allocator_bitmap_end;
+ xenheap_phys_end = DIRECTMAP_MBYTES << 20;
+#else
if ( !xen_phys_start )
EARLY_FAIL("Not enough memory to relocate Xen.\n");
- xenheap_phys_end += xen_phys_start;
- reserve_e820_ram(&boot_e820, xen_phys_start,
- xen_phys_start + (opt_xenheap_megabytes<<20));
+ reserve_e820_ram(&boot_e820, __pa(&_start), allocator_bitmap_end);
#endif
/* Late kexec reservation (dynamic start address). */
numa_initmem_init(0, max_page);
- /* Initialise the Xen heap, skipping RAM holes. */
+#if defined(CONFIG_X86_32)
+ /* Initialise the Xen heap. */
init_xenheap_pages(xenheap_phys_start, xenheap_phys_end);
nr_pages = (xenheap_phys_end - xenheap_phys_start) >> PAGE_SHIFT;
-#ifdef __x86_64__
- init_xenheap_pages(xen_phys_start, __pa(&_start));
- nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT;
- vesa_init();
-#endif
xenheap_phys_start = xen_phys_start;
printk("Xen heap: %luMB (%lukB)\n",
nr_pages >> (20 - PAGE_SHIFT),
nr_pages << (PAGE_SHIFT - 10));
+#endif
end_boot_allocator();
-
early_boot = 0;
+#if defined(CONFIG_X86_64)
+ vesa_init();
+#endif
+
softirq_init();
early_cpu_init();
int xen_in_range(paddr_t start, paddr_t end)
{
- start = max_t(paddr_t, start, xenheap_phys_start);
- end = min_t(paddr_t, end, xenheap_phys_end);
-
- return start < end;
+#if defined(CONFIG_X86_32)
+ paddr_t xs = xenheap_phys_start;
+ paddr_t xe = xenheap_phys_end;
+#else
+ paddr_t xs = __pa(&_start);
+ paddr_t xe = __pa(&_end);
+#endif
+
+ return (start < xe) && (end > xs);
}
/*
* XEN-HEAP SUB-ALLOCATOR
*/
+#ifndef __x86_64__
+
void init_xenheap_pages(paddr_t ps, paddr_t pe)
{
ps = round_pgup(ps);
free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
}
+#else
+
+void init_xenheap_pages(paddr_t ps, paddr_t pe)
+{
+ init_domheap_pages(ps, pe);
+}
+
+void *alloc_xenheap_pages(unsigned int order)
+{
+ struct page_info *pg;
+ unsigned int i;
+
+ ASSERT(!in_irq());
+
+ pg = alloc_heap_pages(
+ MEMZONE_XEN+1, 31, cpu_to_node(smp_processor_id()), order);
+ if ( unlikely(pg == NULL) )
+ goto no_memory;
+
+ for ( i = 0; i < (1u << order); i++ )
+ pg[i].count_info |= PGC_xen_heap;
+
+ return page_to_virt(pg);
+
+ no_memory:
+ printk("Cannot handle page request order %d!\n", order);
+ return NULL;
+}
+
+void free_xenheap_pages(void *v, unsigned int order)
+{
+ struct page_info *pg;
+ unsigned int i;
+
+ ASSERT(!in_irq());
+
+ if ( v == NULL )
+ return;
+
+ pg = virt_to_page(v);
+
+ for ( i = 0; i < (1u << order); i++ )
+ pg[i].count_info &= ~PGC_xen_heap;
+
+ free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+}
+
+#endif
+
/*************************