#define L1_CACHE_SHIFT (CONFIG_ARM_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __section(".data.read_mostly")
#endif
/*
/* Separate out the type, so (int[3], foo) works. */
#define __DEFINE_PER_CPU(type, name, suffix) \
- __attribute__((__section__(".bss.percpu" #suffix))) \
+ __section(".bss.percpu" #suffix) \
__typeof__(type) per_cpu_##name
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __section(".data.read_mostly")
#endif
/* Separate out the type, so (int[3], foo) works. */
#define __DEFINE_PER_CPU(type, name, suffix) \
- __attribute__((__section__(".bss.percpu" #suffix))) \
+ __section(".bss.percpu" #suffix) \
__typeof__(type) per_cpu_##name
/* var is in discarded region: offset to particular copy we want */
#define always_inline __inline__ __attribute__ ((always_inline))
#define noinline __attribute__((noinline))
+#define __section(s) __attribute__((__section__(s)))
+#define __used_section(s) __attribute_used__ __attribute__((__section__(s)))
+#define __text_section(s) __attribute__((__section__(s)))
+
#ifdef INIT_SECTIONS_ONLY
/*
* For sources indicated to have only init code, make sure even
* Mark functions and data as being only used at initialization
* or exit time.
*/
-#define __init \
- __attribute__ ((__section__ (".init.text")))
-#define __exit \
- __attribute_used__ __attribute__ ((__section__(".exit.text")))
-#define __initdata \
- __attribute__ ((__section__ (".init.data")))
-#define __exitdata \
- __attribute_used__ __attribute__ ((__section__ (".exit.data")))
-#define __initsetup \
- __attribute_used__ __attribute__ ((__section__ (".init.setup")))
-#define __init_call(lvl) \
- __attribute_used__ __attribute__ ((__section__ (".initcall" lvl ".init")))
-#define __exit_call \
- __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
+#define __init __text_section(".init.text")
+#define __exit __text_section(".exit.text")
+#define __initdata __section(".init.data")
+#define __exitdata __used_section(".exit.data")
+#define __initsetup __used_section(".init.setup")
+#define __init_call(lvl) __used_section(".initcall" lvl ".init")
+#define __exit_call __used_section(".exitcall.exit")
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
extern struct kernel_param __setup_start, __setup_end;
#define __setup_str static __initdata __attribute__((__aligned__(1))) char
-#define __kparam static __attribute_used__ __initsetup struct kernel_param
+#define __kparam static __initsetup struct kernel_param
#define custom_param(_name, _var) \
__setup_str __setup_str_##_var[] = _name; \
#define _LOCK_PROFILE(name) { 0, #name, &name, 0, 0, 0, 0, 0 }
#define _LOCK_PROFILE_PTR(name) \
- static struct lock_profile *__lock_profile_##name __attribute_used__ \
- __attribute__ ((__section__(".lockprofile.data"))) = \
+ static struct lock_profile *__lock_profile_##name \
+ __used_section(".lockprofile.data") = \
&__lock_profile_data_##name
#define _SPIN_LOCK_UNLOCKED(x) { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, \
_LOCK_DEBUG, x }
#define xsm_initcall(fn) \
static xsm_initcall_t __initcall_##fn \
- __attribute_used__ __attribute__((__section__(".xsm_initcall.init"))) = fn
+ __used_section(".xsm_initcall.init") = fn
struct xsm_operations {
void (*security_domaininfo) (struct domain *d,