summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/decompress.c1
-rw-r--r--arch/arm/include/asm/elf.h8
-rw-r--r--arch/arm/kernel/atags_parse.c3
-rw-r--r--arch/arm64/include/asm/elf.h12
-rw-r--r--arch/arm64/mm/kasan_init.c8
-rw-r--r--arch/frv/include/asm/Kbuild2
-rw-r--r--arch/frv/include/asm/cmpxchg.h1
-rw-r--r--arch/frv/include/asm/device.h7
-rw-r--r--arch/frv/include/asm/fb.h12
-rw-r--r--arch/mips/kernel/module.c3
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/powerpc/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/elf.h15
-rw-r--r--arch/sh/mm/extable_64.c34
-rw-r--r--arch/sparc/mm/extable.c28
-rw-r--r--arch/x86/include/asm/elf.h13
-rw-r--r--arch/x86/mm/kasan_init_64.c7
17 files changed, 72 insertions, 98 deletions
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index ea7832702a8f..f3a4bedd1afc 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -33,6 +33,7 @@ extern void error(char *);
/* Not needed, but used in some headers pulled in by decompressors */
extern char * strstr(const char * s1, const char *s2);
extern size_t strlen(const char *s);
+extern int memcmp(const void *cs, const void *ct, size_t count);
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ffd8f12..f13ae153fb24 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 68c6ae0b9e4c..98fbfd235ac8 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/root_dev.h>
@@ -91,8 +92,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
#ifdef CONFIG_BLK_DEV_RAM
static int __init parse_tag_ramdisk(const struct tag *tag)
{
- extern int rd_size, rd_image_start, rd_prompt, rd_doload;
-
rd_image_start = tag->u.ramdisk.start;
rd_doload = (tag->u.ramdisk.flags & 1) == 0;
rd_prompt = (tag->u.ramdisk.flags & 2) == 0;
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index ac3fb7441510..acae781f7359 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@@ -174,7 +173,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 687a358a3733..81f03959a4ab 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -191,14 +191,8 @@ void __init kasan_init(void)
if (start >= end)
break;
- /*
- * end + 1 here is intentional. We check several shadow bytes in
- * advance to slightly speed up fastpath. In some rare cases
- * we could cross boundary of mapped shadow, so we just map
- * some more here.
- */
vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
- (unsigned long)kasan_mem_to_shadow(end) + 1,
+ (unsigned long)kasan_mem_to_shadow(end),
pfn_to_nid(virt_to_pfn(start)));
}
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index cce3bc3603ea..2cf7648787b2 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -1,7 +1,9 @@
generic-y += clkdev.h
+generic-y += device.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/frv/include/asm/cmpxchg.h b/arch/frv/include/asm/cmpxchg.h
index a899765102ea..ad1f11cfa92a 100644
--- a/arch/frv/include/asm/cmpxchg.h
+++ b/arch/frv/include/asm/cmpxchg.h
@@ -76,6 +76,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
* - if (*ptr != test) then orig = *ptr;
*/
extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
+#define cmpxchg64(p, o, n) __cmpxchg_64((o), (n), (p))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
diff --git a/arch/frv/include/asm/device.h b/arch/frv/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/frv/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/frv/include/asm/fb.h b/arch/frv/include/asm/fb.h
deleted file mode 100644
index c7df38030992..000000000000
--- a/arch/frv/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 94627a3a6a0d..50c020c47e54 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -317,7 +317,8 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
- e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
+ e = search_extable(dbe->dbe_start,
+ dbe->dbe_end - dbe->dbe_start, addr);
if (e)
break;
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38dfa27730ff..b68b4d0726d3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -429,7 +429,8 @@ static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
- e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
+ e = search_extable(__start___dbe_table,
+ __stop___dbe_table - __start___dbe_table, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 09bde6e34f5d..548d9a411a0d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE 0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
+ 0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index ec024c08dabe..c92ed0170be2 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -193,14 +193,13 @@ struct arch_elf_state {
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. 64-bit
- tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
- (STACK_TOP / 3 * 2) : \
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
index b90cdfad2c78..7a3b4d33d2e7 100644
--- a/arch/sh/mm/extable_64.c
+++ b/arch/sh/mm/extable_64.c
@@ -10,6 +10,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#include <linux/bsearch.h>
#include <linux/rwsem.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
@@ -40,10 +41,23 @@ static const struct exception_table_entry *check_exception_ranges(unsigned long
return NULL;
}
+static int cmp_ex_search(const void *key, const void *elt)
+{
+ const struct exception_table_entry *_elt = elt;
+ unsigned long _key = *(unsigned long *)key;
+
+ /* avoid overflow */
+ if (_key > _elt->insn)
+ return 1;
+ if (_key < _elt->insn)
+ return -1;
+ return 0;
+}
+
/* Simple binary search */
const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
+search_extable(const struct exception_table_entry *base,
+ const size_t num,
unsigned long value)
{
const struct exception_table_entry *mid;
@@ -52,20 +66,8 @@ search_extable(const struct exception_table_entry *first,
if (mid)
return mid;
- while (first <= last) {
- long diff;
-
- mid = (last - first) / 2 + first;
- diff = mid->insn - value;
- if (diff == 0)
- return mid;
- else if (diff < 0)
- first = mid+1;
- else
- last = mid-1;
- }
-
- return NULL;
+ return bsearch(&value, base, num,
+ sizeof(struct exception_table_entry), cmp_ex_search);
}
int fixup_exception(struct pt_regs *regs)
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
index db214e9931d9..2422511dc8c5 100644
--- a/arch/sparc/mm/extable.c
+++ b/arch/sparc/mm/extable.c
@@ -13,11 +13,11 @@ void sort_extable(struct exception_table_entry *start,
/* Caller knows they are in a range if ret->fixup == 0 */
const struct exception_table_entry *
-search_extable(const struct exception_table_entry *start,
- const struct exception_table_entry *last,
+search_extable(const struct exception_table_entry *base,
+ const size_t num,
unsigned long value)
{
- const struct exception_table_entry *walk;
+ int i;
/* Single insn entries are encoded as:
* word 1: insn address
@@ -37,30 +37,30 @@ search_extable(const struct exception_table_entry *start,
*/
/* 1. Try to find an exact match. */
- for (walk = start; walk <= last; walk++) {
- if (walk->fixup == 0) {
+ for (i = 0; i < num; i++) {
+ if (base[i].fixup == 0) {
/* A range entry, skip both parts. */
- walk++;
+ i++;
continue;
}
/* A deleted entry; see trim_init_extable */
- if (walk->fixup == -1)
+ if (base[i].fixup == -1)
continue;
- if (walk->insn == value)
- return walk;
+ if (base[i].insn == value)
+ return &base[i];
}
/* 2. Try to find a range match. */
- for (walk = start; walk <= (last - 1); walk++) {
- if (walk->fixup)
+ for (i = 0; i < (num - 1); i++) {
+ if (base[i].fixup)
continue;
- if (walk[0].insn <= value && walk[1].insn > value)
- return walk;
+ if (base[i].insn <= value && base[i + 1].insn > value)
+ return &base[i];
- walk++;
+ i++;
}
return NULL;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e8ab9a46bc68..1c18d83d3f09 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@ extern int force_personality32;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 88215ac16b24..02c9d7553409 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -23,12 +23,7 @@ static int __init map_range(struct range *range)
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
- /*
- * end + 1 here is intentional. We check several shadow bytes in advance
- * to slightly speed up fastpath. In some rare cases we could cross
- * boundary of mapped shadow, so we just map some more here.
- */
- return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
+ return vmemmap_populate(start, end, NUMA_NO_NODE);
}
static void __init clear_pgds(unsigned long start,