From dc4fbba11e4661a6a77a1f89ba32f9082e6395ff Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 29 Oct 2015 11:44:05 +1100 Subject: powerpc: Create disable_kernel_{fp,altivec,vsx,spe}() The enable_kernel_*() functions leave the relevant MSR bits enabled until we exit the kernel sometime later. Create disable versions that wrap the kernel use of FP, Altivec VSX or SPE. While we don't want to disable it normally for performance reasons (MSR writes are slow), it will be used for a debug boot option that does this and catches bad uses in other areas of the kernel. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman --- lib/raid6/altivec.uc | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index bec27fce7501..682aae8a1fef 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc @@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); + disable_kernel_altivec(); preempt_enable(); } -- cgit v1.2.3 From 7f7e92f7553c663fc1c53f999b0bf88e2d1f30a7 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 4 Sep 2015 12:45:05 +0200 Subject: lib: scatterlist: fix Kconfig description Spelling s/heler/helper/, grammar s/channel/channels/ Signed-off-by: Geert Uytterhoeven Signed-off-by: Jiri Kosina --- lib/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index f0df318104e7..5a0c1c83cdf0 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -512,9 +512,9 @@ source "lib/fonts/Kconfig" config SG_SPLIT def_bool n help - Provides a heler to split scatterlists into chunks, each chunk being a - scatterlist. This should be selected by a driver or an API which - whishes to split a scatterlist amongst multiple DMA channel. + Provides a helper to split scatterlists into chunks, each chunk being + a scatterlist. This should be selected by a driver or an API which + whishes to split a scatterlist amongst multiple DMA channels. # # sg chaining option -- cgit v1.2.3 From de2dd0eb30af55d3893979d5641c50c7a8969c99 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:52 +0800 Subject: genalloc:support memory-allocation with bytes-alignment to genalloc Bytes alignment is required to manage some special RAM, so add gen_pool_first_fit_align to genalloc, meanwhile add gen_pool_alloc_algo to pass algo in case user layer using more than one algo, and pass data to gen_pool_first_fit_align(modify gen_pool_alloc as a wrapper) Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- lib/genalloc.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index 116a166b096f..b8cf89d9e17d 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -269,6 +269,25 @@ EXPORT_SYMBOL(gen_pool_destroy); * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +{ + return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); +} +EXPORT_SYMBOL(gen_pool_alloc); + +/** + * gen_pool_alloc_algo - allocate special memory from the pool + * @pool: pool to allocate from + * @size: number of bytes to allocate from the pool + * @algo: algorithm passed from caller + * @data: data passed to algorithm + * + * Allocate the requested number of bytes from the specified pool. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. + */ +unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, + genpool_algo_t algo, void *data) { struct gen_pool_chunk *chunk; unsigned long addr = 0; @@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit = chunk_size(chunk) >> order; retry: - start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, - pool->data); + start_bit = algo(chunk->bits, end_bit, start_bit, + nbits, data, pool); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -310,7 +329,7 @@ retry: rcu_read_unlock(); return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_algo); /** * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage @@ -501,14 +520,41 @@ EXPORT_SYMBOL(gen_pool_set_algo); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data) + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } EXPORT_SYMBOL(gen_pool_first_fit); +/** + * gen_pool_first_fit_align - find the first available region + * of memory matching the size requirement (alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: data for alignment + * @pool: pool to get order from + */ +unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) +{ + struct genpool_data_align *alignment; + unsigned long align_mask; + int order; + + alignment = data; + order = pool->min_alloc_order; + align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; + return bitmap_find_next_zero_area(map, size, start, nr, align_mask); +} +EXPORT_SYMBOL(gen_pool_first_fit_align); + /** * gen_pool_first_fit_order_align - find the first available region * of memory matching the size requirement. The region will be aligned @@ -518,10 +564,11 @@ EXPORT_SYMBOL(gen_pool_first_fit); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, - unsigned int nr, void *data) + unsigned int nr, void *data, struct gen_pool *pool) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; @@ -537,12 +584,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from * * Iterate over the bitmap to find the smallest free region * which we can allocate the memory. */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data) + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) { unsigned long start_bit = size; unsigned long len = size + 1; -- cgit v1.2.3 From b26981c8f743d3cb64a6907eb1f5c6c4ba6ca672 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:53 +0800 Subject: genalloc:support allocating specific region Add new algo for genalloc, it reserve a specific region of memory Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- lib/genalloc.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index b8cf89d9e17d..5ec83cd93284 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -555,6 +555,38 @@ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, } EXPORT_SYMBOL(gen_pool_first_fit_align); +/** + * gen_pool_fixed_alloc - reserve a specific region + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: data for alignment + * @pool: pool to get order from + */ +unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) +{ + struct genpool_data_fixed *fixed_data; + int order; + unsigned long offset_bit; + unsigned long start_bit; + + fixed_data = data; + order = pool->min_alloc_order; + offset_bit = fixed_data->offset >> order; + if (WARN_ON(fixed_data->offset & (1UL << order - 1))) + return size; + + start_bit = bitmap_find_next_zero_area(map, size, + start + offset_bit, nr, 0); + if (start_bit != offset_bit) + start_bit = size; + return start_bit; +} +EXPORT_SYMBOL(gen_pool_fixed_alloc); + /** * gen_pool_first_fit_order_align - find the first available region * of memory matching the size requirement. The region will be aligned -- cgit v1.2.3 From 0e6e01ff694ee222acc5a9184211678473c948e3 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:54 +0800 Subject: CPM/QE: use genalloc to manage CPM/QE muram Use genalloc to manage CPM/QE muram instead of rheap. Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- lib/genalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/genalloc.c b/lib/genalloc.c index 5ec83cd93284..0a1139644d32 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -576,7 +576,7 @@ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, fixed_data = data; order = pool->min_alloc_order; offset_bit = fixed_data->offset >> order; - if (WARN_ON(fixed_data->offset & (1UL << order - 1))) + if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) return size; start_bit = bitmap_find_next_zero_area(map, size, -- cgit v1.2.3 From 21266be9ed542f13436bd9c75316d43e1e84f6ae Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 19 Nov 2015 18:19:29 -0800 Subject: arch: consolidate CONFIG_STRICT_DEVM in lib/Kconfig.debug Let all the archs that implement devmem_is_allowed() opt-in to a common definition of CONFIG_STRICT_DEVM in lib/Kconfig.debug. Cc: Kees Cook Cc: Russell King Cc: Will Deacon Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Greg Kroah-Hartman Cc: "David S. Miller" Acked-by: Catalin Marinas Acked-by: Heiko Carstens [heiko: drop 'default y' for s390] Acked-by: Ingo Molnar Suggested-by: Arnd Bergmann Signed-off-by: Dan Williams --- lib/Kconfig.debug | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8c15b29d5adc..289dfcbc14eb 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1853,3 +1853,25 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" +config ARCH_HAS_DEVMEM_IS_ALLOWED + bool + +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU + depends on ARCH_HAS_DEVMEM_IS_ALLOWED + default y if TILE || PPC + ---help--- + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. Note that with PAT support + enabled, even in this case there are restrictions on /dev/mem + use due to the cache aliasing requirements. + + If this option is switched on, the /dev/mem file only allows + userspace access to PCI space and the BIOS code and data regions. + This is sufficient for dosemu and X and all common users of + /dev/mem. + + If in doubt, say Y. -- cgit v1.2.3 From 90a545e981267e917b9d698ce07affd69787db87 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 23 Nov 2015 15:49:03 -0800 Subject: restrict /dev/mem to idle io memory ranges This effectively promotes IORESOURCE_BUSY to IORESOURCE_EXCLUSIVE semantics by default. If userspace really believes it is safe to access the memory region it can also perform the extra step of disabling an active driver. This protects device address ranges with read side effects and otherwise directs userspace to use the driver. Persistent memory presents a large "mistake surface" to /dev/mem as now accidental writes can corrupt a filesystem. In general if a device driver is busily using a memory region it already informs other parts of the kernel to not touch it via request_mem_region(). /dev/mem should honor the same safety restriction by default. Debugging a device driver from userspace becomes more difficult with this enabled. Any application using /dev/mem or mmap of sysfs pci resources will now need to perform the extra step of either: 1/ Disabling the driver, for example: echo > /dev/bus//drivers//unbind 2/ Rebooting with "iomem=relaxed" on the command line 3/ Recompiling with CONFIG_IO_STRICT_DEVMEM=n Traditional users of /dev/mem like dosemu are unaffected because the first 1MB of memory is not subject to the IO_STRICT_DEVMEM restriction. Legacy X configurations use /dev/mem to talk to graphics hardware, but that functionality has since moved to kernel graphics drivers. Cc: Arnd Bergmann Cc: Russell King Cc: Andrew Morton Cc: Greg Kroah-Hartman Acked-by: Kees Cook Acked-by: Ingo Molnar Signed-off-by: Dan Williams --- lib/Kconfig.debug | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 289dfcbc14eb..073496dea848 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1869,9 +1869,26 @@ config STRICT_DEVMEM enabled, even in this case there are restrictions on /dev/mem use due to the cache aliasing requirements. + If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem + file only allows userspace access to PCI space and the BIOS code and + data regions. This is sufficient for dosemu and X and all common + users of /dev/mem. + + If in doubt, say Y. + +config IO_STRICT_DEVMEM + bool "Filter I/O access to /dev/mem" + depends on STRICT_DEVMEM + default STRICT_DEVMEM + ---help--- + If this option is disabled, you allow userspace (root) access to all + io-memory regardless of whether a driver is actively using that + range. Accidental access to this is obviously disastrous, but + specific access can be used by people debugging kernel drivers. + If this option is switched on, the /dev/mem file only allows - userspace access to PCI space and the BIOS code and data regions. - This is sufficient for dosemu and X and all common users of - /dev/mem. + userspace access to *idle* io-memory ranges (see /proc/iomem) This + may break traditional users of /dev/mem (dosemu, legacy X, etc...) + if the driver using a given range cannot be disabled. If in doubt, say Y. -- cgit v1.2.3 From ea535e418c01837d07b6c94e817540f50bfdadb0 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 14 Jan 2016 15:16:50 -0800 Subject: dma-debug: switch check from _text to _stext In include/asm-generic/sections.h: /* * Usage guidelines: * _text, _data: architecture specific, don't use them in * arch-independent code * [_stext, _etext]: contains .text.* sections, may also contain * .rodata.* * and/or .init.* sections _text is not guaranteed across architectures. Architectures such as ARM may reuse parts which are not actually text and erroneously trigger a bug. Switch to using _stext which is guaranteed to contain text sections. Came out of https://lkml.kernel.org/g/<567B1176.4000106@redhat.com> Signed-off-by: Laura Abbott Reviewed-by: Kees Cook Cc: Russell King Cc: Arnd Bergmann Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dma-debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d34bd24c2c84..4a1515f4b452 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { - if (overlap(addr, len, _text, _etext) || + if (overlap(addr, len, _stext, _etext) || overlap(addr, len, __start_rodata, __end_rodata)) err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } -- cgit v1.2.3