Both the trampoline copy and BSS initialise can be performed more
efficiently by using 4-byte variants of the string operations.
On Intel systems with ERMSB (efficient rep movsb), this is no practical
difference. On all other systems, this is 4 times more efficient.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
mov $sym_phys(__bss_end),%ecx
sub %edi,%ecx
xor %eax,%eax
- rep stosb
+ shr $2,%ecx
+ rep stosl
/* Interrogate CPU extended features via CPUID. */
mov $0x80000000,%eax
/* Copy bootstrap trampoline to low memory, below 1MB. */
mov $sym_phys(trampoline_start),%esi
- mov $trampoline_end - trampoline_start,%ecx
- rep movsb
+ mov $((trampoline_end - trampoline_start) / 4),%ecx
+ rep movsl
/* Jump into the relocated trampoline. */
lret
ENTRY(trampoline_start)
#include "trampoline.S"
-GLOBAL(trampoline_end)
+ENTRY(trampoline_end)
#include "x86_64.S"
ASSERT(IS_ALIGNED(__init_begin, PAGE_SIZE), "__init_begin misaligned")
ASSERT(IS_ALIGNED(__init_end, PAGE_SIZE), "__init_end misaligned")
+
+ASSERT(IS_ALIGNED(trampoline_start, 4), "trampoline_start misaligned")
+ASSERT(IS_ALIGNED(trampoline_end, 4), "trampoline_end misaligned")
+ASSERT(IS_ALIGNED(__bss_start, 4), "__bss_start misaligned")
+ASSERT(IS_ALIGNED(__bss_end, 4), "__bss_end misaligned")