#define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
/* This must be the very first address in the loaded image.
* It should be linked at XEN_VIRT_START, and loaded at any
- * 2MB-aligned address. All of text+data+bss must fit in 2MB,
+ * 4K-aligned address. All of text+data+bss must fit in 2MB,
* or the initial pagetable code below will need adjustment. */
.global start
start:
/* Setup boot_pgtable: */
ldr r1, =boot_second
add r1, r1, r10 /* r1 := paddr (boot_second) */
- mov r3, #0x0
/* ... map boot_second in boot_pgtable[0] */
orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_second */
orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
+ mov r3, #0x0
strd r2, r3, [r4, #0] /* Map it in slot 0 */
/* ... map of paddr(start) in boot_pgtable */
ldr r4, =boot_second
add r4, r4, r10 /* r4 := paddr (boot_second) */
- lsr r2, r9, #SECOND_SHIFT /* Base address for 2MB mapping */
- lsl r2, r2, #SECOND_SHIFT
+ ldr r1, =boot_third
+ add r1, r1, r10 /* r1 := paddr (boot_third) */
+
+ /* ... map boot_third in boot_second[1] */
+ orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_third */
+ orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
+ mov r3, #0x0
+ strd r2, r3, [r4, #8] /* Map it in slot 1 */
+
+ /* ... map of paddr(start) in boot_second */
+ lsr r2, r9, #SECOND_SHIFT /* Offset of base paddr in boot_second */
+ ldr r3, =LPAE_ENTRY_MASK
+ and r1, r2, r3
+ cmp r1, #1
+ beq virtphys_clash /* It's in slot 1, which we cannot handle */
+
+ lsl r2, r2, #SECOND_SHIFT /* Base address for 2MB mapping */
orr r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
orr r2, r2, #PT_LOWER(MEM)
+ mov r3, #0x0
+ lsl r1, r1, #3 /* r1 := Slot offset */
+ strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
- /* ... map of vaddr(start) in boot_second */
- ldr r1, =start
- lsr r1, #(SECOND_SHIFT - 3) /* Slot for vaddr(start) */
- strd r2, r3, [r4, r1] /* Map vaddr(start) */
+ /* Setup boot_third: */
+1: ldr r4, =boot_third
+ add r4, r4, r10 /* r4 := paddr (boot_third) */
- /* ... map of paddr(start) in boot_second */
- lsrs r1, r9, #30 /* Base paddr */
- bne 1f /* If paddr(start) is not in slot 0
- * then the mapping was done in
- * boot_pgtable above */
+ lsr r2, r9, #THIRD_SHIFT /* Base address for 4K mapping */
+ lsl r2, r2, #THIRD_SHIFT
+ orr r2, r2, #PT_UPPER(MEM_L3) /* r2:r3 := map */
+ orr r2, r2, #PT_LOWER(MEM_L3)
+ mov r3, #0x0
- mov r1, r9, lsr #(SECOND_SHIFT - 3) /* Slot for paddr(start) */
- strd r2, r3, [r4, r1] /* Map Xen there */
-1:
+ /* ... map of vaddr(start) in boot_third */
+ mov r1, #0
+1: strd r2, r3, [r4, r1] /* Map vaddr(start) */
+ add r2, r2, #PAGE_SIZE /* Next page */
+ add r1, r1, #8 /* Next slot */
+ cmp r1, #(LPAE_ENTRIES<<3) /* 512*8-byte entries per page */
+ blo 1b
/* Defer fixmap and dtb mapping until after paging enabled, to
* avoid them clashing with the 1:1 mapping. */
/* boot pagetable setup complete */
+ b 1f
+
+virtphys_clash:
+ /* Identity map clashes with boot_third, which we cannot handle yet */
+ PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
+ b fail
+
+1:
PRINT("- Turning on paging -\r\n")
ldr r1, =paging /* Explicit vaddr, not RIP-relative */
#define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
*
* This must be the very first address in the loaded image.
* It should be linked at XEN_VIRT_START, and loaded at any
- * 2MB-aligned address. All of text+data+bss must fit in 2MB,
+ * 4K-aligned address. All of text+data+bss must fit in 2MB,
* or the initial pagetable code below will need adjustment.
*/
lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
mov x3, #PT_MEM /* x2 := Section mapping */
orr x2, x2, x3
- lsl x1, x1, #3 /* x1 := Slot offset */
- str x2, [x4, x1] /* Mapping of paddr(start)*/
+ and x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */
+ lsl x1, x1, #3
+ str x2, [x4, x1] /* Mapping of paddr(start) */
1: /* Setup boot_first: */
ldr x4, =boot_first /* Next level into boot_first */
/* ... map of paddr(start) in boot_first */
lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
- and x1, x2, 0x1ff /* x1 := Slot to use */
+ and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
cbz x1, 1f /* It's in slot 0, map in boot_second */
lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */
ldr x4, =boot_second /* Next level into boot_second */
add x4, x4, x20 /* x4 := paddr(boot_second) */
- lsr x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
- lsl x2, x2, #SECOND_SHIFT
+ /* ... map boot_third in boot_second[1] */
+ ldr x1, =boot_third
+ add x1, x1, x20 /* x1 := paddr(boot_third) */
+ mov x3, #PT_PT /* x2 := table map of boot_third */
+ orr x2, x1, x3 /* + rights for linear PT */
+ str x2, [x4, #8] /* Map it in slot 1 */
+
+ /* ... map of paddr(start) in boot_second */
+ lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
+ and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
+ cmp x1, #1
+ b.eq virtphys_clash /* It's in slot 1, which we cannot handle */
+
+ lsl x2, x2, #SECOND_SHIFT /* Base address for 2MB mapping */
mov x3, #PT_MEM /* x2 := Section map */
orr x2, x2, x3
+ lsl x1, x1, #3 /* x1 := Slot offset */
+ str x2, [x4, x1] /* Create mapping of paddr(start)*/
- /* ... map of vaddr(start) in boot_second */
- ldr x1, =start
- lsr x1, x1, #(SECOND_SHIFT - 3) /* Slot for vaddr(start) */
- str x2, [x4, x1] /* Map vaddr(start) */
+1: /* Setup boot_third: */
+ ldr x4, =boot_third
+ add x4, x4, x20 /* x4 := paddr (boot_third) */
- /* ... map of paddr(start) in boot_second */
- lsr x1, x19, #FIRST_SHIFT /* Base paddr */
- cbnz x1, 1f /* If paddr(start) is not in slot 0
- * then the mapping was done in
- * boot_pgtable or boot_first above */
+ lsr x2, x19, #THIRD_SHIFT /* Base address for 4K mapping */
+ lsl x2, x2, #THIRD_SHIFT
+ mov x3, #PT_MEM_L3 /* x2 := Section map */
+ orr x2, x2, x3
- lsr x1, x19, #(SECOND_SHIFT - 3) /* Slot for paddr(start) */
- str x2, [x4, x1] /* Map Xen there */
-1:
+ /* ... map of vaddr(start) in boot_third */
+ mov x1, xzr
+1: str x2, [x4, x1] /* Map vaddr(start) */
+ add x2, x2, #PAGE_SIZE /* Next page */
+ add x1, x1, #8 /* Next slot */
+ cmp x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */
+ b.lt 1b
/* Defer fixmap and dtb mapping until after paging enabled, to
* avoid them clashing with the 1:1 mapping. */
/* boot pagetable setup complete */
+ b 1f
+
+virtphys_clash:
+ /* Identity map clashes with boot_third, which we cannot handle yet */
+ PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
+ b fail
+
+1:
PRINT("- Turning on paging -\r\n")
ldr x1, =paging /* Explicit vaddr, not RIP-relative */
* to the CPUs own pagetables.
*
* These pagetables have a very simple structure. They include:
- * - a 2MB mapping of xen at XEN_VIRT_START, boot_first and
- * boot_second are used to populate the trie down to that mapping.
+ * - 2MB worth of 4K mappings of xen at XEN_VIRT_START, boot_first and
+ * boot_second are used to populate the tables down to boot_third
+ * which contains the actual mapping.
* - a 1:1 mapping of xen at its current physical address. This uses a
* section mapping at whichever of boot_{pgtable,first,second}
* covers that physical address.
lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
#endif
lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
/* Main runtime page tables */
#endif
memset(boot_second, 0x0, PAGE_SIZE);
clean_and_invalidate_xen_dcache(boot_second);
+ memset(boot_third, 0x0, PAGE_SIZE);
+ clean_and_invalidate_xen_dcache(boot_third);
/* Break up the Xen mapping into 4k pages and protect them separately. */
for ( i = 0; i < LPAE_ENTRIES; i++ )
*/
#define LPAE_SHIFT 9
-#define LPAE_ENTRIES (1u << LPAE_SHIFT)
+#define LPAE_ENTRIES (_AC(1,U) << LPAE_SHIFT)
#define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
#define THIRD_SHIFT (PAGE_SHIFT)