/* Insert Xen mappings. */
for ( i = l4_table_offset(HYPERVISOR_VIRT_START);
- i < l4_table_offset(HYPERVISOR_VIRT_END); ++i )
+ i < l4_table_offset(DIRECTMAP_VIRT_END); ++i )
efi_l4_pgtable[i] = idle_pg_table[i];
#endif
}
/* Xen private mappings. */
memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
&idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
+ ROOT_PAGETABLE_PV_XEN_SLOTS * sizeof(l4_pgentry_t));
l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
#include <xen/dmi.h>
#include <xen/pfn.h>
#include <xen/nodemask.h>
+#include <xen/tmem_xen.h> /* for opt_tmem only */
#include <public/version.h>
#include <compat/platform.h>
#include <compat/xen.h>
if ( max_pdx > FRAMETABLE_NR )
max_pdx = FRAMETABLE_NR;
+ if ( max_pdx >= PAGE_LIST_NULL )
+ max_pdx = PAGE_LIST_NULL - 1;
+
max_page = pdx_to_pfn(max_pdx - 1) + 1;
}
/* Create new mappings /before/ passing memory to the allocator. */
if ( map_e < e )
{
- map_pages_to_xen((unsigned long)__va(map_e), map_e >> PAGE_SHIFT,
- (e - map_e) >> PAGE_SHIFT, PAGE_HYPERVISOR);
- init_boot_pages(map_e, e);
+ uint64_t limit = __pa(HYPERVISOR_VIRT_END - 1) + 1;
+ uint64_t end = min(e, limit);
+
+ if ( map_e < end )
+ {
+ map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
+ PFN_DOWN(end - map_e), PAGE_HYPERVISOR);
+ init_boot_pages(map_e, end);
+ map_e = end;
+ }
+ }
+ if ( map_e < e )
+ {
+ /* This range must not be passed to the boot allocator and
+ * must also not be mapped with _PAGE_GLOBAL. */
+ map_pages_to_xen((unsigned long)__va(map_e), PFN_DOWN(map_e),
+ PFN_DOWN(e - map_e), __PAGE_HYPERVISOR);
}
if ( s < map_s )
{
end_boot_allocator();
system_state = SYS_STATE_boot;
+ if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
+ {
+ unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
+ uint64_t mask = PAGE_SIZE - 1;
+
+ xenheap_max_mfn(limit);
+
+ /* Pass the remaining memory to the allocator. */
+ for ( i = 0; i < boot_e820.nr_map; i++ )
+ {
+ uint64_t s, e;
+
+ s = (boot_e820.map[i].addr + mask) & ~mask;
+ e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
+ if ( PFN_DOWN(e) <= limit )
+ continue;
+ if ( PFN_DOWN(s) <= limit )
+ s = pfn_to_paddr(limit + 1);
+ init_domheap_pages(s, e);
+ }
+
+ if ( opt_tmem )
+ {
+ printk(XENLOG_WARNING
+ "TMEM physical RAM limit exceeded, disabling TMEM\n");
+ opt_tmem = 0;
+ }
+ }
+
vm_init();
vesa_init();
return -EINVAL;
}
- ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), spfn,
- epfn - spfn, PAGE_HYPERVISOR);
- if ( ret )
- return ret;
+ i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
+ if ( spfn < i )
+ {
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), spfn,
+ min(epfn, i) - spfn, PAGE_HYPERVISOR);
+ if ( ret )
+ return ret;
+ }
+ if ( i < epfn )
+ {
+ if ( i < spfn )
+ i = spfn;
+ ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), i,
+ epfn - i, __PAGE_HYPERVISOR);
+ if ( ret )
+ return ret;
+ }
old_node_start = NODE_DATA(node)->node_start_pfn;
old_node_span = NODE_DATA(node)->node_spanned_pages;
unsigned long needed = (sizeof(**_heap) +
sizeof(**avail) * NR_ZONES +
PAGE_SIZE - 1) >> PAGE_SHIFT;
+#ifdef DIRECTMAP_VIRT_END
+ unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
+#endif
int i, j;
if ( !first_node_initialised )
}
#ifdef DIRECTMAP_VIRT_END
else if ( *use_tail && nr >= needed &&
- (mfn + nr) <= (virt_to_mfn(DIRECTMAP_VIRT_END - 1) + 1) )
+ (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) )
{
_heap[node] = mfn_to_virt(mfn + nr - needed);
avail[node] = mfn_to_virt(mfn + nr - 1) +
PAGE_SIZE - sizeof(**avail) * NR_ZONES;
}
else if ( nr >= needed &&
- (mfn + needed) <= (virt_to_mfn(DIRECTMAP_VIRT_END - 1) + 1) )
+ (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) )
{
_heap[node] = mfn_to_virt(mfn);
avail[node] = mfn_to_virt(mfn + needed - 1) +
#else
+static unsigned int __read_mostly xenheap_bits;
+
+void __init xenheap_max_mfn(unsigned long mfn)
+{
+ xenheap_bits = fls(mfn) + PAGE_SHIFT - 1;
+}
+
void init_xenheap_pages(paddr_t ps, paddr_t pe)
{
init_domheap_pages(ps, pe);
ASSERT(!in_irq());
+ if ( xenheap_bits && (memflags >> _MEMF_bits) > xenheap_bits )
+ memflags &= ~MEMF_bits(~0);
+ if ( !(memflags >> _MEMF_bits) )
+ memflags |= MEMF_bits(xenheap_bits);
+
pg = alloc_domheap_pages(NULL, order, memflags);
if ( unlikely(pg == NULL) )
return NULL;
* Page-frame information array.
* 0xffff830000000000 - 0xffff87ffffffffff [5TB, 5*2^40 bytes, PML4:262-271]
* 1:1 direct mapping of all physical memory.
- * 0xffff880000000000 - 0xffffffffffffffff [120TB, PML4:272-511]
- * Guest-defined use.
+ * 0xffff880000000000 - 0xffffffffffffffff [120TB, PML4:272-511]
+ * PV: Guest-defined use.
+ * 0xffff880000000000 - 0xffffff7fffffffff [119.5TB, PML4:272-510]
+ * HVM/idle: continuation of 1:1 mapping
+ * 0xffffff8000000000 - 0xffffffffffffffff [512GB, 2^39 bytes PML4:511]
+ * HVM/idle: unused
*
* Compatibility guest area layout:
* 0x0000000000000000 - 0x00000000f57fffff [3928MB, PML4:0]
#define ROOT_PAGETABLE_FIRST_XEN_SLOT 256
#define ROOT_PAGETABLE_LAST_XEN_SLOT 271
#define ROOT_PAGETABLE_XEN_SLOTS \
+ (L4_PAGETABLE_ENTRIES - ROOT_PAGETABLE_FIRST_XEN_SLOT - 1)
+#define ROOT_PAGETABLE_PV_XEN_SLOTS \
(ROOT_PAGETABLE_LAST_XEN_SLOT - ROOT_PAGETABLE_FIRST_XEN_SLOT + 1)
/* Hypervisor reserves PML4 slots 256 to 271 inclusive. */
#define FRAMETABLE_SIZE GB(128)
#define FRAMETABLE_NR (FRAMETABLE_SIZE / sizeof(*frame_table))
#define FRAMETABLE_VIRT_START (FRAMETABLE_VIRT_END - FRAMETABLE_SIZE)
-/* Slot 262-271: A direct 1:1 mapping of all of physical memory. */
+/* Slot 262-271/510: A direct 1:1 mapping of all of physical memory. */
#define DIRECTMAP_VIRT_START (PML4_ADDR(262))
-#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES*10)
+#define DIRECTMAP_SIZE (PML4_ENTRY_BYTES * (511 - 262))
#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE)
#ifndef __ASSEMBLY__
/* Xen suballocator. These functions are interrupt-safe. */
void init_xenheap_pages(paddr_t ps, paddr_t pe);
+void xenheap_max_mfn(unsigned long mfn);
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
void free_xenheap_pages(void *v, unsigned int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
/* These must only have instances in struct page_info. */
# define page_list_entry
-#define PAGE_LIST_NULL (~0)
+# define PAGE_LIST_NULL ((typeof(((struct page_info){}).list.next))~0)
# if !defined(pdx_to_page) && !defined(page_to_pdx)
# if defined(__page_to_mfn) || defined(__mfn_to_page)