diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/percpu.h | 10 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 38 | ||||
-rw-r--r-- | include/linux/cache.h | 2 | ||||
-rw-r--r-- | include/linux/init.h | 2 | ||||
-rw-r--r-- | include/linux/init_task.h | 2 | ||||
-rw-r--r-- | include/linux/linkage.h | 8 | ||||
-rw-r--r-- | include/linux/percpu-defs.h | 4 | ||||
-rw-r--r-- | include/linux/spinlock.h | 2 |
8 files changed, 34 insertions, 34 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 04f91c2d3f7b..b5043a9890d8 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void); #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif @@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "" #else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION ".first" +#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_FIRST_SECTION "" #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ef779c6fc3d7..48c5299cbf26 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -175,25 +175,25 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : { \ INIT_TASK_DATA(align) \ } @@ -499,7 +499,7 @@ #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ - *(.bss.page_aligned) \ + *(.bss..page_aligned) \ *(.dynbss) \ *(.bss) \ *(COMMON) \ @@ -666,16 +666,16 @@ */ #define PERCPU_VADDR(vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } phdr \ - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU - define output section for percpu area, simple version @@ -687,18 +687,18 @@ * * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except * that __per_cpu_load is defined as a relative symbol against - * .data.percpu which is required for relocatable x86_32 + * .data..percpu which is required for relocatable x86_32 * configuration. */ #define PERCPU(align) \ . = ALIGN(align); \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c6..4c570653ab84 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@ #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) + __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp diff --git a/include/linux/init.h b/include/linux/init.h index ab1d31f9352b..de994304e0bb 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline); #endif /* Data marked not to be saved by software suspend */ -#define __nosavedata __section(.data.nosave) +#define __nosavedata __section(.data..nosave) /* This means "can be init if no module support, otherwise module load may call it." */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2beaa13492be..1f43fa56f600 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -183,7 +183,7 @@ extern struct cred init_cred; } /* Attach to the init_task data structure for proper alignment */ -#define __init_task_data __attribute__((__section__(".data.init_task"))) +#define __init_task_data __attribute__((__section__(".data..init_task"))) #endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5126cceb6ae9..7135ebc8428c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -18,8 +18,8 @@ # define asmregparm #endif -#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) -#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) /* * For assembly routines. @@ -27,8 +27,8 @@ * Note when using these that you must specify the appropriate * alignment directives yourself */ -#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" -#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" /* * This is used by architectures to keep arguments on the stack diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 68567c0b3a5d..ce2dc655cd1d 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -131,11 +131,11 @@ * Declaration/definition used for per-CPU variables that must be page aligned. */ #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ - DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ - DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) /* diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 89fac6a3f78b..f8854655860e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -60,7 +60,7 @@ /* * Must define these before including other files, inline functions need them */ -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME +#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ |