* the hardware domain which needs a more permissive one.
*/
#define HVM_IOBITMAP_SIZE (3 * PAGE_SIZE)
-unsigned long __section(".bss.page_aligned")
+unsigned long __section(".bss.page_aligned") __aligned(PAGE_SIZE)
hvm_io_bitmap[HVM_IOBITMAP_SIZE / BYTES_PER_LONG];
/* Xen command-line option to enable HAP */
#include <asm/pci.h>
/* Mapping of the fixmap space needed early. */
-l1_pgentry_t __section(".bss.page_aligned") l1_fixmap[L1_PAGETABLE_ENTRIES];
+l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
+ l1_fixmap[L1_PAGETABLE_ENTRIES];
#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)
TOGGLE_MODE();
}
-const char __section(".bss.page_aligned.const") zero_page[PAGE_SIZE];
+const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
+ zero_page[PAGE_SIZE];
static void invalidate_shadow_ldt(struct vcpu *v, int flush)
{
DEFINE_PER_CPU(struct tss_struct, init_tss);
-char __section(".bss.stack_aligned") cpu0_stack[STACK_SIZE];
+char __section(".bss.stack_aligned") __aligned(STACK_SIZE)
+ cpu0_stack[STACK_SIZE];
struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
} :text
.data : { /* Data */
- . = ALIGN(PAGE_SIZE);
*(.data.page_aligned)
*(.data)
*(.data.rel)
} :text
.bss : { /* BSS */
- . = ALIGN(STACK_SIZE);
__bss_start = .;
*(.bss.stack_aligned)
- . = ALIGN(PAGE_SIZE);
*(.bss.page_aligned*)
*(.bss)
. = ALIGN(SMP_CACHE_BYTES);
#define __used_section(s) __used __attribute__((__section__(s)))
#define __text_section(s) __attribute__((__section__(s)))
+#define __aligned(a) __attribute__((__aligned__(a)))
+
#ifdef INIT_SECTIONS_ONLY
/*
* For sources indicated to have only init code, make sure even