diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 6 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/asn1_decoder.c | 27 | ||||
-rw-r--r-- | lib/bitmap.c | 43 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 6 | ||||
-rw-r--r-- | lib/decompress_inflate.c | 31 | ||||
-rw-r--r-- | lib/decompress_unlz4.c | 6 | ||||
-rw-r--r-- | lib/decompress_unlzma.c | 9 | ||||
-rw-r--r-- | lib/decompress_unlzo.c | 13 | ||||
-rw-r--r-- | lib/decompress_unxz.c | 12 | ||||
-rw-r--r-- | lib/devres.c | 13 | ||||
-rw-r--r-- | lib/genalloc.c | 110 | ||||
-rw-r--r-- | lib/kstrtox.c | 2 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 162 | ||||
-rw-r--r-- | lib/pci_iomap.c | 7 | ||||
-rw-r--r-- | lib/raid6/neon.c | 13 | ||||
-rw-r--r-- | lib/raid6/neon.uc | 46 | ||||
-rw-r--r-- | lib/show_mem.c | 4 | ||||
-rw-r--r-- | lib/string_helpers.c | 20 | ||||
-rw-r--r-- | lib/test-kstrtox.c | 6 | ||||
-rw-r--r-- | lib/test_kasan.c | 6 | ||||
-rw-r--r-- | lib/zlib_deflate/deftree.c | 6 | ||||
-rw-r--r-- | lib/zlib_deflate/defutil.h | 16 |
23 files changed, 438 insertions, 128 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a16555281d53..2e491ac15622 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -53,9 +53,6 @@ config GENERIC_IO config STMP_DEVICE bool -config PERCPU_RWSEM - bool - config ARCH_USE_CMPXCHG_LOCKREF bool @@ -528,4 +525,7 @@ config ARCH_HAS_SG_CHAIN config ARCH_HAS_PMEM_API bool +config ARCH_HAS_MMIO_FLUSH + bool + endmenu diff --git a/lib/Makefile b/lib/Makefile index f01c558bf80d..13a7c6ae3fec 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ sha1.o md5.o irq_regs.o argv_split.o \ proportions.o flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o + earlycpio.o seq_buf.o nmi_backtrace.o obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o lib-$(CONFIG_MMU) += ioremap.o diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 1a000bb050f9..2b3f46c049d4 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -24,15 +24,20 @@ static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_ANY] = 1, + [ASN1_OP_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, + [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_ANY] = 1, + [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, + [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_FAIL] = 1, [ASN1_OP_COMPLETE] = 1, [ASN1_OP_ACT] = 1 + 1, + [ASN1_OP_MAYBE_ACT] = 1 + 1, [ASN1_OP_RETURN] = 1, [ASN1_OP_END_SEQ] = 1, [ASN1_OP_END_SEQ_OF] = 1 + 1, @@ -177,6 +182,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder, unsigned char flags = 0; #define FLAG_INDEFINITE_LENGTH 0x01 #define FLAG_MATCHED 0x02 +#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */ #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag * - ie. whether or not we are going to parse * a compound type. @@ -208,9 +214,9 @@ next_op: unsigned char tmp; /* Skip conditional matches if possible */ - if ((op & ASN1_OP_MATCH__COND && - flags & FLAG_MATCHED) || - dp == datalen) { + if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || + (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { + flags &= ~FLAG_LAST_MATCHED; pc += asn1_op_lengths[op]; goto next_op; } @@ -302,7 +308,9 @@ next_op: /* Decide how to handle the operation */ switch (op) { case ASN1_OP_MATCH_ANY_ACT: + case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY_ACT: + case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len); if (ret < 0) return ret; @@ -319,8 +327,10 @@ next_op: case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: case ASN1_OP_MATCH_ANY: + case ASN1_OP_MATCH_ANY_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: + case ASN1_OP_COND_MATCH_ANY_OR_SKIP: skip_data: if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { @@ -422,8 +432,15 @@ next_op: pc += asn1_op_lengths[op]; goto next_op; + case ASN1_OP_MAYBE_ACT: + if (!(flags & FLAG_LAST_MATCHED)) { + pc += asn1_op_lengths[op]; + goto next_op; + } case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); + if (ret < 0) + return ret; pc += asn1_op_lengths[op]; goto next_op; @@ -431,6 +448,7 @@ next_op: if (unlikely(jsp <= 0)) goto jump_stack_underflow; pc = jump_stack[--jsp]; + flags |= FLAG_MATCHED | FLAG_LAST_MATCHED; goto next_op; default: @@ -438,7 +456,8 @@ next_op: } /* Shouldn't reach here */ - pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op); + pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n", + op, pc); return -EBADMSG; data_overrun_error: diff --git a/lib/bitmap.c b/lib/bitmap.c index a578a0189199..814814397cce 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -367,7 +367,8 @@ int __bitmap_parse(const char *buf, unsigned int buflen, nchunks = nbits = totaldigits = c = 0; do { - chunk = ndigits = 0; + chunk = 0; + ndigits = totaldigits; /* Get the next chunk of the bitmap */ while (buflen) { @@ -406,9 +407,9 @@ int __bitmap_parse(const char *buf, unsigned int buflen, return -EOVERFLOW; chunk = (chunk << 4) | hex_to_bin(c); - ndigits++; totaldigits++; + totaldigits++; } - if (ndigits == 0) + if (ndigits == totaldigits) return -EINVAL; if (nchunks == 0 && chunk == 0) continue; @@ -505,7 +506,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, int nmaskbits) { unsigned a, b; - int c, old_c, totaldigits; + int c, old_c, totaldigits, ndigits; const char __user __force *ubuf = (const char __user __force *)buf; int at_start, in_range; @@ -515,6 +516,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, at_start = 1; in_range = 0; a = b = 0; + ndigits = totaldigits; /* Get the next cpu# or a range of cpu#'s */ while (buflen) { @@ -528,23 +530,27 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, if (isspace(c)) continue; - /* - * If the last character was a space and the current - * character isn't '\0', we've got embedded whitespace. - * This is a no-no, so throw an error. - */ - if (totaldigits && c && isspace(old_c)) - return -EINVAL; - /* A '\0' or a ',' signal the end of a cpu# or range */ if (c == '\0' || c == ',') break; + /* + * whitespaces between digits are not allowed, + * but it's ok if whitespaces are on head or tail. + * when old_c is whilespace, + * if totaldigits == ndigits, whitespace is on head. + * if whitespace is on tail, it should not run here. + * as c was ',' or '\0', + * the last code line has broken the current loop. + */ + if ((totaldigits != ndigits) && isspace(old_c)) + return -EINVAL; if (c == '-') { if (at_start || in_range) return -EINVAL; b = 0; in_range = 1; + at_start = 1; continue; } @@ -557,15 +563,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, at_start = 0; totaldigits++; } + if (ndigits == totaldigits) + continue; + /* if no digit is after '-', it's wrong*/ + if (at_start && in_range) + return -EINVAL; if (!(a <= b)) return -EINVAL; if (b >= nmaskbits) return -ERANGE; - if (!at_start) { - while (a <= b) { - set_bit(a, maskp); - a++; - } + while (a <= b) { + set_bit(a, maskp); + a++; } } while (buflen && c == ','); return 0; diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 6dd0335ea61b..0234361b24b8 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -743,12 +743,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long len, +STATIC int INIT __decompress(unsigned char *buf, long len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *outbuf, + unsigned char *outbuf, long olen, long *pos, - void(*error)(char *x)) + void (*error)(char *x)) { return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); } diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index d4c7891635ec..555c06bf20da 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -1,4 +1,5 @@ #ifdef STATIC +#define PREBOOT /* Pre-boot environment: included */ /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots @@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len) } /* Included from initramfs et al code */ -STATIC int INIT gunzip(unsigned char *buf, long len, +STATIC int INIT __gunzip(unsigned char *buf, long len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *out_buf, + unsigned char *out_buf, long out_len, long *pos, void(*error)(char *x)) { u8 *zbuf; struct z_stream_s *strm; int rc; - size_t out_len; rc = -1; if (flush) { out_len = 0x8000; /* 32 K */ out_buf = malloc(out_len); } else { - out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ + if (!out_len) + out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ } if (!out_buf) { error("Out of memory while allocating output buffer"); @@ -181,4 +182,24 @@ gunzip_nomem1: return rc; /* returns Z_OK (0) if successful */ } -#define decompress gunzip +#ifndef PREBOOT +STATIC int INIT gunzip(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, + long *pos, + void (*error)(char *x)) +{ + return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); +} +#else +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long out_len, + long *pos, + void (*error)(char *x)) +{ + return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error); +} +#endif diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 40f66ebe57b7..036fc882cd72 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c @@ -196,12 +196,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long in_len, +STATIC int INIT __decompress(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *output, + unsigned char *output, long out_len, long *posp, - void(*error)(char *x) + void (*error)(char *x) ) { return unlz4(buf, in_len - 4, fill, flush, output, posp, error); diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 0be83af62b88..ed7a1fd819f2 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c @@ -620,7 +620,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, long in_len, num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); - if (p == 0) + if (p == NULL) goto exit_2; num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); for (i = 0; i < num_probs; i++) @@ -667,13 +667,12 @@ exit_0: } #ifdef PREBOOT -STATIC int INIT decompress(unsigned char *buf, long in_len, +STATIC int INIT __decompress(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), - unsigned char *output, + unsigned char *output, long out_len, long *posp, - void(*error)(char *x) - ) + void (*error)(char *x)) { return unlzma(buf, in_len - 4, fill, flush, output, posp, error); } diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index b94a31bdd87d..f4c158e3a022 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c @@ -31,6 +31,7 @@ */ #ifdef STATIC +#define PREBOOT #include "lzo/lzo1x_decompress_safe.c" #else #include <linux/decompress/unlzo.h> @@ -287,4 +288,14 @@ exit: return ret; } -#define decompress unlzo +#ifdef PREBOOT +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long olen, + long *pos, + void (*error)(char *x)) +{ + return unlzo(buf, len, fill, flush, out_buf, pos, error); +} +#endif diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c index b07a78340e9d..25d59a95bd66 100644 --- a/lib/decompress_unxz.c +++ b/lib/decompress_unxz.c @@ -394,4 +394,14 @@ error_alloc_state: * This macro is used by architecture-specific files to decompress * the kernel image. */ -#define decompress unxz +#ifdef XZ_PREBOOT +STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *out_buf, long olen, + long *pos, + void (*error)(char *x)) +{ + return unxz(buf, len, fill, flush, out_buf, pos, error); +} +#endif diff --git a/lib/devres.c b/lib/devres.c index fbe2aac522e6..f13a2468ff39 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap); * @dev: generic device to handle the resource for * @res: resource to be handled * - * Checks that a resource is a valid memory region, requests the memory region - * and ioremaps it either as cacheable or as non-cacheable memory depending on - * the resource's flags. All operations are managed and will be undone on - * driver detach. + * Checks that a resource is a valid memory region, requests the memory + * region and ioremaps it. All operations are managed and will be undone + * on driver detach. * * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. Usage example: @@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) return IOMEM_ERR_PTR(-EBUSY); } - if (res->flags & IORESOURCE_CACHEABLE) - dest_ptr = devm_ioremap(dev, res->start, size); - else - dest_ptr = devm_ioremap_nocache(dev, res->start, size); - + dest_ptr = devm_ioremap(dev, res->start, size); if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); diff --git a/lib/genalloc.c b/lib/genalloc.c index daf0afb6d979..116a166b096f 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -160,6 +160,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) pool->min_alloc_order = min_alloc_order; pool->algo = gen_pool_first_fit; pool->data = NULL; + pool->name = NULL; } return pool; } @@ -252,8 +253,8 @@ void gen_pool_destroy(struct gen_pool *pool) kfree(chunk); } + kfree_const(pool->name); kfree(pool); - return; } EXPORT_SYMBOL(gen_pool_destroy); @@ -570,53 +571,88 @@ static void devm_gen_pool_release(struct device *dev, void *res) gen_pool_destroy(*(struct gen_pool **)res); } +static int devm_gen_pool_match(struct device *dev, void *res, void *data) +{ + struct gen_pool **p = res; + + /* NULL data matches only a pool without an assigned name */ + if (!data && !(*p)->name) + return 1; + + if (!data || !(*p)->name) + return 0; + + return !strcmp((*p)->name, data); +} + +/** + * gen_pool_get - Obtain the gen_pool (if any) for a device + * @dev: device to retrieve the gen_pool from + * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device + * + * Returns the gen_pool for the device if one is present, or NULL. + */ +struct gen_pool *gen_pool_get(struct device *dev, const char *name) +{ + struct gen_pool **p; + + p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, + (void *)name); + if (!p) + return NULL; + return *p; +} +EXPORT_SYMBOL_GPL(gen_pool_get); + /** * devm_gen_pool_create - managed gen_pool_create * @dev: device that provides the gen_pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on, or -1 + * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes + * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. The pool will be * automatically destroyed by the device management code. */ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, - int nid) + int nid, const char *name) { struct gen_pool **ptr, *pool; + const char *pool_name = NULL; + + /* Check that genpool to be created is uniquely addressed on device */ + if (gen_pool_get(dev, name)) + return ERR_PTR(-EINVAL); + + if (name) { + pool_name = kstrdup_const(name, GFP_KERNEL); + if (!pool_name) + return ERR_PTR(-ENOMEM); + } ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) - return NULL; + goto free_pool_name; pool = gen_pool_create(min_alloc_order, nid); - if (pool) { - *ptr = pool; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } + if (!pool) + goto free_devres; + + *ptr = pool; + pool->name = pool_name; + devres_add(dev, ptr); return pool; -} -EXPORT_SYMBOL(devm_gen_pool_create); -/** - * gen_pool_get - Obtain the gen_pool (if any) for a device - * @dev: device to retrieve the gen_pool from - * - * Returns the gen_pool for the device if one is present, or NULL. - */ -struct gen_pool *gen_pool_get(struct device *dev) -{ - struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, - NULL); +free_devres: + devres_free(ptr); +free_pool_name: + kfree_const(pool_name); - if (!p) - return NULL; - return *p; + return ERR_PTR(-ENOMEM); } -EXPORT_SYMBOL_GPL(gen_pool_get); +EXPORT_SYMBOL(devm_gen_pool_create); #ifdef CONFIG_OF /** @@ -633,16 +669,30 @@ struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; - struct device_node *np_pool; + struct device_node *np_pool, *parent; + const char *name = NULL; + struct gen_pool *pool = NULL; np_pool = of_parse_phandle(np, propname, index); if (!np_pool) return NULL; + pdev = of_find_device_by_node(np_pool); + if (!pdev) { + /* Check if named gen_pool is created by parent node device */ + parent = of_get_parent(np_pool); + pdev = of_find_device_by_node(parent); + of_node_put(parent); + + of_property_read_string(np_pool, "label", &name); + if (!name) + name = np_pool->name; + } + if (pdev) + pool = gen_pool_get(&pdev->dev, name); of_node_put(np_pool); - if (!pdev) - return NULL; - return gen_pool_get(&pdev->dev); + + return pool; } EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */ diff --git a/lib/kstrtox.c b/lib/kstrtox.c index ec8da78df9be..94be244e8441 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -152,7 +152,7 @@ int kstrtoll(const char *s, unsigned int base, long long *res) rv = _kstrtoull(s + 1, base, &tmp); if (rv < 0) return rv; - if ((long long)(-tmp) >= 0) + if ((long long)-tmp > 0) return -ERANGE; *res = -tmp; } else { diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c new file mode 100644 index 000000000000..88d3d32e5923 --- /dev/null +++ b/lib/nmi_backtrace.c @@ -0,0 +1,162 @@ +/* + * NMI backtrace support + * + * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, + * with the following header: + * + * HW NMI watchdog support + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * Arch specific calls to support NMI watchdog + * + * Bits copied from original nmi.c file + */ +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/kprobes.h> +#include <linux/nmi.h> +#include <linux/seq_buf.h> + +#ifdef arch_trigger_all_cpu_backtrace +/* For reliability, we're prepared to waste bits here. */ +static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; +static cpumask_t printtrace_mask; + +#define NMI_BUF_SIZE 4096 + +struct nmi_seq_buf { + unsigned char buffer[NMI_BUF_SIZE]; + struct seq_buf seq; +}; + +/* Safe printing in NMI context */ +static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); + +/* "in progress" flag of arch_trigger_all_cpu_backtrace */ +static unsigned long backtrace_flag; + +static void print_seq_line(struct nmi_seq_buf *s, int start, int end) +{ + const char *buf = s->buffer + start; + + printk("%.*s", (end - start) + 1, buf); +} + +void nmi_trigger_all_cpu_backtrace(bool include_self, + void (*raise)(cpumask_t *mask)) +{ + struct nmi_seq_buf *s; + int i, cpu, this_cpu = get_cpu(); + + if (test_and_set_bit(0, &backtrace_flag)) { + /* + * If there is already a trigger_all_cpu_backtrace() in progress + * (backtrace_flag == 1), don't output double cpu dump infos. + */ + put_cpu(); + return; + } + + cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); + if (!include_self) + cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); + + cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); + + /* + * Set up per_cpu seq_buf buffers that the NMIs running on the other + * CPUs will write to. + */ + for_each_cpu(cpu, to_cpumask(backtrace_mask)) { + s = &per_cpu(nmi_print_seq, cpu); + seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); + } + + if (!cpumask_empty(to_cpumask(backtrace_mask))) { + pr_info("Sending NMI to %s CPUs:\n", + (include_self ? "all" : "other")); + raise(to_cpumask(backtrace_mask)); + } + + /* Wait for up to 10 seconds for all CPUs to do the backtrace */ + for (i = 0; i < 10 * 1000; i++) { + if (cpumask_empty(to_cpumask(backtrace_mask))) + break; + mdelay(1); + touch_softlockup_watchdog(); + } + + /* + * Now that all the NMIs have triggered, we can dump out their + * back traces safely to the console. + */ + for_each_cpu(cpu, &printtrace_mask) { + int len, last_i = 0; + + s = &per_cpu(nmi_print_seq, cpu); + len = seq_buf_used(&s->seq); + if (!len) + continue; + + /* Print line by line. */ + for (i = 0; i < len; i++) { + if (s->buffer[i] == '\n') { + print_seq_line(s, last_i, i); + last_i = i + 1; + } + } + /* Check if there was a partial line. */ + if (last_i < len) { + print_seq_line(s, last_i, len - 1); + pr_cont("\n"); + } + } + + clear_bit(0, &backtrace_flag); + smp_mb__after_atomic(); + put_cpu(); +} + +/* + * It is not safe to call printk() directly from NMI handlers. + * It may be fine if the NMI detected a lock up and we have no choice + * but to do so, but doing a NMI on all other CPUs to get a back trace + * can be done with a sysrq-l. We don't want that to lock up, which + * can happen if the NMI interrupts a printk in progress. + * + * Instead, we redirect the vprintk() to this nmi_vprintk() that writes + * the content into a per cpu seq_buf buffer. Then when the NMIs are + * all done, we can safely dump the contents of the seq_buf to a printk() + * from a non NMI context. + */ +static int nmi_vprintk(const char *fmt, va_list args) +{ + struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); + unsigned int len = seq_buf_used(&s->seq); + + seq_buf_vprintf(&s->seq, fmt, args); + return seq_buf_used(&s->seq) - len; +} + +bool nmi_cpu_backtrace(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { + printk_func_t printk_func_save = this_cpu_read(printk_func); + + /* Replace printk to write into the NMI seq */ + this_cpu_write(printk_func, nmi_vprintk); + pr_warn("NMI backtrace for cpu %d\n", cpu); + show_regs(regs); + this_cpu_write(printk_func, printk_func_save); + + cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); + return true; + } + + return false; +} +NOKPROBE_SYMBOL(nmi_cpu_backtrace); +#endif diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index 5f5d24d1d53f..c10fba461454 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -41,11 +41,8 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, len = maxlen; if (flags & IORESOURCE_IO) return __pci_ioport_map(dev, start, len); - if (flags & IORESOURCE_MEM) { - if (flags & IORESOURCE_CACHEABLE) - return ioremap(start, len); - return ioremap_nocache(start, len); - } + if (flags & IORESOURCE_MEM) + return ioremap(start, len); /* What? */ return NULL; } diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c index d9ad6ee284f4..7076ef1ba3dd 100644 --- a/lib/raid6/neon.c +++ b/lib/raid6/neon.c @@ -40,9 +40,20 @@ (unsigned long)bytes, ptrs); \ kernel_neon_end(); \ } \ + static void raid6_neon ## _n ## _xor_syndrome(int disks, \ + int start, int stop, \ + size_t bytes, void **ptrs) \ + { \ + void raid6_neon ## _n ## _xor_syndrome_real(int, \ + int, int, unsigned long, void**); \ + kernel_neon_begin(); \ + raid6_neon ## _n ## _xor_syndrome_real(disks, \ + start, stop, (unsigned long)bytes, ptrs); \ + kernel_neon_end(); \ + } \ struct raid6_calls const raid6_neonx ## _n = { \ raid6_neon ## _n ## _gen_syndrome, \ - NULL, /* XOR not yet implemented */ \ + raid6_neon ## _n ## _xor_syndrome, \ raid6_have_neon, \ "neonx" #_n, \ 0 \ diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc index 1b9ed793342d..4fa51b761dd0 100644 --- a/lib/raid6/neon.uc +++ b/lib/raid6/neon.uc @@ -3,6 +3,7 @@ * neon.uc - RAID-6 syndrome calculation using ARM NEON instructions * * Copyright (C) 2012 Rob Herring + * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org> * * Based on altivec.uc: * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved @@ -78,3 +79,48 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) vst1q_u8(&q[d+NSIZE*$$], wq$$); } } + +void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs) +{ + uint8_t **dptr = (uint8_t **)ptrs; + uint8_t *p, *q; + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; + const unative_t x1d = NBYTES(0x1d); + + z0 = stop; /* P/Q right side optimization */ + p = dptr[disks-2]; /* XOR parity */ + q = dptr[disks-1]; /* RS syndrome */ + + for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { + wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); + wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); + + /* P/Q data pages */ + for ( z = z0-1 ; z >= start ; z-- ) { + wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); + wp$$ = veorq_u8(wp$$, wd$$); + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + + w2$$ = vandq_u8(w2$$, x1d); + w1$$ = veorq_u8(w1$$, w2$$); + wq$$ = veorq_u8(w1$$, wd$$); + } + /* P/Q left side optimization */ + for ( z = start-1 ; z >= 0 ; z-- ) { + w2$$ = MASK(wq$$); + w1$$ = SHLBYTE(wq$$); + + w2$$ = vandq_u8(w2$$, x1d); + wq$$ = veorq_u8(w1$$, w2$$); + } + w1$$ = vld1q_u8(&q[d+NSIZE*$$]); + wq$$ = veorq_u8(wq$$, w1$$); + + vst1q_u8(&p[d+NSIZE*$$], wp$$); + vst1q_u8(&q[d+NSIZE*$$], wq$$); + } +} diff --git a/lib/show_mem.c b/lib/show_mem.c index adc98e1825ba..1feed6a2b12a 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -38,11 +38,9 @@ void show_mem(unsigned int filter) printk("%lu pages RAM\n", total); printk("%lu pages HighMem/MovableOnly\n", highmem); + printk("%lu pages reserved\n", reserved); #ifdef CONFIG_CMA - printk("%lu pages reserved\n", (reserved - totalcma_pages)); printk("%lu pages cma reserved\n", totalcma_pages); -#else - printk("%lu pages reserved\n", reserved); #endif #ifdef CONFIG_QUICKLIST printk("%lu pages in pagetable cache\n", diff --git a/lib/string_helpers.c b/lib/string_helpers.c index c98ae818eb4e..54036ce2e2dd 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -410,7 +410,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * @dst: destination buffer (escaped) * @osz: destination buffer size * @flags: combination of the flags (bitwise OR): - * %ESCAPE_SPACE: + * %ESCAPE_SPACE: (special white space, not space itself) * '\f' - form feed * '\n' - new line * '\r' - carriage return @@ -432,16 +432,18 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * all previous together * %ESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (2 digits) - * @esc: NULL-terminated string of characters any of which, if found in - * the source, has to be escaped + * @only: NULL-terminated string containing characters used to limit + * the selected escape class. If characters are included in @only + * that would not normally be escaped by the classes selected + * in @flags, they will be copied to @dst unescaped. * * Description: * The process of escaping byte buffer includes several parts. They are applied * in the following sequence. * 1. The character is matched to the printable class, if asked, and in * case of match it passes through to the output. - * 2. The character is not matched to the one from @esc string and thus - * must go as is to the output. + * 2. The character is not matched to the one from @only string and thus + * must go as-is to the output. * 3. The character is checked if it falls into the class given by @flags. * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any * character. Note that they actually can't go together, otherwise @@ -458,11 +460,11 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * dst for a '\0' terminator if and only if ret < osz. */ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, - unsigned int flags, const char *esc) + unsigned int flags, const char *only) { char *p = dst; char *end = p + osz; - bool is_dict = esc && *esc; + bool is_dict = only && *only; while (isz--) { unsigned char c = *src++; @@ -471,7 +473,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, * Apply rules in the following sequence: * - the character is printable, when @flags has * %ESCAPE_NP bit set - * - the @esc string is supplied and does not contain a + * - the @only string is supplied and does not contain a * character under question * - the character doesn't fall into a class of symbols * defined by given @flags @@ -479,7 +481,7 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, * output buffer. */ if ((flags & ESCAPE_NP && isprint(c)) || - (is_dict && !strchr(esc, c))) { + (is_dict && !strchr(only, c))) { /* do nothing */ } else { if (flags & ESCAPE_SPACE && escape_space(c, &p, end)) diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index 4137bca5f8e8..f355f67169b6 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c @@ -260,6 +260,7 @@ static void __init test_kstrtoll_ok(void) {"4294967297", 10, 4294967297LL}, {"9223372036854775807", 10, 9223372036854775807LL}, + {"-0", 10, 0LL}, {"-1", 10, -1LL}, {"-2", 10, -2LL}, {"-9223372036854775808", 10, LLONG_MIN}, @@ -277,11 +278,6 @@ static void __init test_kstrtoll_fail(void) {"-9223372036854775809", 10}, {"-18446744073709551614", 10}, {"-18446744073709551615", 10}, - /* negative zero isn't an integer in Linux */ - {"-0", 0}, - {"-0", 8}, - {"-0", 10}, - {"-0", 16}, /* sign is first character if any */ {"-+1", 0}, {"-+1", 8}, diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 098c08eddfab..c1efb1b61017 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -65,7 +65,7 @@ static noinline void __init kmalloc_node_oob_right(void) kfree(ptr); } -static noinline void __init kmalloc_large_oob_rigth(void) +static noinline void __init kmalloc_large_oob_right(void) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; @@ -114,7 +114,7 @@ static noinline void __init kmalloc_oob_krealloc_less(void) kfree(ptr1); return; } - ptr2[size1] = 'x'; + ptr2[size2] = 'x'; kfree(ptr2); } @@ -259,7 +259,7 @@ static int __init kmalloc_tests_init(void) kmalloc_oob_right(); kmalloc_oob_left(); kmalloc_node_oob_right(); - kmalloc_large_oob_rigth(); + kmalloc_large_oob_right(); kmalloc_oob_krealloc_more(); kmalloc_oob_krealloc_less(); kmalloc_oob_16(); diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c index ddf348299f24..9b1756b12743 100644 --- a/lib/zlib_deflate/deftree.c +++ b/lib/zlib_deflate/deftree.c @@ -35,6 +35,7 @@ /* #include "deflate.h" */ #include <linux/zutil.h> +#include <linux/bitrev.h> #include "defutil.h" #ifdef DEBUG_ZLIB @@ -146,7 +147,6 @@ static void send_all_trees (deflate_state *s, int lcodes, int dcodes, static void compress_block (deflate_state *s, ct_data *ltree, ct_data *dtree); static void set_data_type (deflate_state *s); -static unsigned bi_reverse (unsigned value, int length); static void bi_windup (deflate_state *s); static void bi_flush (deflate_state *s); static void copy_block (deflate_state *s, char *buf, unsigned len, @@ -284,7 +284,7 @@ static void tr_static_init(void) /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; - static_dtree[n].Code = bi_reverse((unsigned)n, 5); + static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5); } static_init_done = 1; } @@ -520,7 +520,7 @@ static void gen_codes( int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ - tree[n].Code = bi_reverse(next_code[len]++, len); + tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h index b640b6402e99..a8c370897c9f 100644 --- a/lib/zlib_deflate/defutil.h +++ b/lib/zlib_deflate/defutil.h @@ -293,22 +293,6 @@ void zlib_tr_stored_type_only (deflate_state *); } /* =========================================================================== - * Reverse the first len bits of a code, using straightforward code (a faster - * method would use a table) - * IN assertion: 1 <= len <= 15 - */ -static inline unsigned bi_reverse(unsigned code, /* the value to invert */ - int len) /* its bit length */ -{ - register unsigned res = 0; - do { - res |= code & 1; - code >>= 1, res <<= 1; - } while (--len > 0); - return res >> 1; -} - -/* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ static inline void bi_flush(deflate_state *s) |