x86: force suitable alignment in sources rather than in linker script
authorJan Beulich <jbeulich@suse.com>
Mon, 15 Aug 2016 08:41:48 +0000 (10:41 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 15 Aug 2016 08:41:48 +0000 (10:41 +0200)
Besides being more logical this also allows verifying correct recording
of alignments in .o files.

The cpu0_stack related ASSERT() in xen.lds.S is now of questionable
value (as it now verifies correct tool chain behavior), but I've left
it in nevertheless.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm.c
xen/arch/x86/setup.c
xen/arch/x86/xen.lds.S
xen/include/xen/compiler.h

index 893eff6dd1546d715b2db40ec7d96eb485e3153c..0180f26b56b75c08c8c53b58a599de6085949bce 100644 (file)
@@ -88,7 +88,7 @@ struct hvm_function_table hvm_funcs __read_mostly;
  * the hardware domain which needs a more permissive one.
  */
 #define HVM_IOBITMAP_SIZE (3 * PAGE_SIZE)
-unsigned long __section(".bss.page_aligned")
+unsigned long __section(".bss.page_aligned") __aligned(PAGE_SIZE)
     hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG];
 
 /* Xen command-line option to enable HAP */
index 109b8bee8dc6c901e4fa66525370e02dd7ff9f6d..ff8e90407311899c15dc78dccfe9fd4be8496a6b 100644 (file)
 #include <asm/pci.h>
 
 /* Mapping of the fixmap space needed early. */
-l1_pgentry_t __section(".bss.page_aligned") l1_fixmap[L1_PAGETABLE_ENTRIES];
+l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
+    l1_fixmap[L1_PAGETABLE_ENTRIES];
 
 #define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)
 
@@ -588,7 +589,8 @@ static inline void guest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr,
     TOGGLE_MODE();
 }
 
-const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
+const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
+    zero_page[PAGE_SIZE];
 
 static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
index 217c775e6d61e46736ece5583c6a29028861bae8..8ae897afaf467ff086d95e458fdbc9ab3f4884a5 100644 (file)
@@ -105,7 +105,8 @@ unsigned long __read_mostly xen_virt_end;
 
 DEFINE_PER_CPU(struct tss_struct, init_tss);
 
-char __section(".bss.stack_aligned") cpu0_stack[STACK_SIZE];
+char __section(".bss.stack_aligned") __aligned(STACK_SIZE)
+    cpu0_stack[STACK_SIZE];
 
 struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
 
index 0970299de5761eb656d694b74e7a317fbb853adb..2d1d43df2d37a310f53b1b4f19726493a211f3e2 100644 (file)
@@ -222,7 +222,6 @@ SECTIONS
   } :text
 
   .data : {                    /* Data */
-       . = ALIGN(PAGE_SIZE);
        *(.data.page_aligned)
        *(.data)
        *(.data.rel)
@@ -231,10 +230,8 @@ SECTIONS
   } :text
 
   .bss : {                     /* BSS */
-       . = ALIGN(STACK_SIZE);
        __bss_start = .;
        *(.bss.stack_aligned)
-       . = ALIGN(PAGE_SIZE);
        *(.bss.page_aligned*)
        *(.bss)
        . = ALIGN(SMP_CACHE_BYTES);
index f3e8d9526de697262f9b0ff762a3c72bf409cd88..33f0b96f289e69874ac1fd4f823c8cf272c8c25a 100644 (file)
@@ -34,6 +34,8 @@
 #define __used_section(s) __used __attribute__((__section__(s)))
 #define __text_section(s) __attribute__((__section__(s)))
 
+#define __aligned(a) __attribute__((__aligned__(a)))
+
 #ifdef INIT_SECTIONS_ONLY
 /*
  * For sources indicated to have only init code, make sure even