summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug60
-rw-r--r--lib/Makefile4
-rw-r--r--lib/asn1_decoder.c2
-rw-r--r--lib/atomic64_test.c13
-rw-r--r--lib/audit.c15
-rw-r--r--lib/btree.c1
-rw-r--r--lib/bug.c21
-rw-r--r--lib/clz_ctz.c7
-rw-r--r--lib/compat_audit.c50
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/debugobjects.c19
-rw-r--r--lib/decompress.c3
-rw-r--r--lib/decompress_inflate.c1
-rw-r--r--lib/devres.c26
-rw-r--r--lib/digsig.c5
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/idr.c74
-rw-r--r--lib/iomap.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c48
-rw-r--r--lib/libcrc32c.c5
-rw-r--r--lib/nlattr.c31
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/plist.c56
-rw-r--r--lib/radix-tree.c396
-rw-r--r--lib/random32.c76
-rw-r--r--lib/smp_processor_id.c18
-rw-r--r--lib/string.c28
-rw-r--r--lib/swiotlb.c2
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/textsearch.c9
-rw-r--r--lib/vsprintf.c62
-rw-r--r--lib/xz/Kconfig24
-rw-r--r--lib/xz/xz_dec_lzma2.c4
36 files changed, 644 insertions, 448 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 991c98bc4a3f..4771fb3f4da4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,15 @@ config AUDIT_GENERIC
depends on AUDIT && !AUDIT_ARCH
default y
+config AUDIT_ARCH_COMPAT_GENERIC
+ bool
+ default n
+
+config AUDIT_COMPAT_GENERIC
+ bool
+ depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
+ default y
+
config RANDOM32_SELFTEST
bool "PRNG perform self test on init"
default n
@@ -342,9 +351,9 @@ config HAS_IOMEM
select GENERIC_IO
default y
-config HAS_IOPORT
+config HAS_IOPORT_MAP
boolean
- depends on HAS_IOMEM && !NO_IOPORT
+ depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
config HAS_DMA
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a48abeac753f..ccca32264748 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -501,12 +501,21 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VM_VMACACHE
+ bool "Debug VMA caching"
+ depends on DEBUG_VM
+ help
+ Enable this to turn on VMA caching debug information. Doing so
+ can cause significant overhead, so only enable it in non-production
+ environments.
+
+ If unsure, say N.
+
config DEBUG_VM_RB
bool "Debug VM red-black trees"
depends on DEBUG_VM
help
- Enable this to turn on more extended checks in the virtual-memory
- system that may impact performance.
+ Enable VM red-black tree debugging information and extra validations.
If unsure, say N.
@@ -576,8 +585,8 @@ config DEBUG_HIGHMEM
bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM
help
- This options enables addition error checking for high memory systems.
- Disable for production systems.
+ This option enables additional error checking for high memory
+ systems. Disable for production systems.
config HAVE_DEBUG_STACKOVERFLOW
bool
@@ -824,11 +833,6 @@ config DEBUG_RT_MUTEXES
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
-config DEBUG_PI_LIST
- bool
- default y
- depends on DEBUG_RT_MUTEXES
-
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
depends on DEBUG_KERNEL && RT_MUTEXES
@@ -980,6 +984,21 @@ config DEBUG_LOCKING_API_SELFTESTS
The following locking APIs are covered: spinlocks, rwlocks,
mutexes and rwsems.
+config LOCK_TORTURE_TEST
+ tristate "torture tests for locking"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ default n
+ help
+ This option provides a kernel module that runs torture tests
+ on kernel locking primitives. The kernel module may be built
+ after the fact on the running kernel to be tested, if desired.
+
+ Say Y here if you want kernel locking-primitive torture tests
+ to be built into the kernel.
+ Say M if you want these torture tests to build as a module.
+ Say N if you are unsure.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1030,22 +1049,22 @@ config DEBUG_BUGVERBOSE
of the BUG call as well as the EIP and oops trace. This aids
debugging but costs about 70-100K of memory.
-config DEBUG_WRITECOUNT
- bool "Debug filesystem writers count"
+config DEBUG_LIST
+ bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
help
- Enable this to catch wrong use of the writers count in struct
- vfsmount. This will increase the size of each file struct by
- 32 bits.
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
If unsure, say N.
-config DEBUG_LIST
- bool "Debug linked list manipulation"
+config DEBUG_PI_LIST
+ bool "Debug priority linked list manipulation"
depends on DEBUG_KERNEL
help
- Enable this to turn on extended checks in the linked-list
- walking routines.
+ Enable this to turn on extended checks in the priority-ordered
+ linked-list (plist) walking routines. This checks the entire
+ list multiple times during each manipulation.
If unsure, say N.
@@ -1141,9 +1160,14 @@ config SPARSE_RCU_POINTER
Say N if you are unsure.
+config TORTURE_TEST
+ tristate
+ default n
+
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
+ select TORTURE_TEST
default n
help
This option provides a kernel module that runs torture tests
diff --git a/lib/Makefile b/lib/Makefile
index 48140e3ba73f..74a32dc49a93 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
@@ -147,7 +148,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
-libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
+ fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 11b9b01fda6b..1a000bb050f9 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -140,7 +140,7 @@ error:
* @decoder: The decoder definition (produced by asn1_compiler)
* @context: The caller's context (to be passed to the action functions)
* @data: The encoded data
- * @datasize: The size of the encoded data
+ * @datalen: The size of the encoded data
*
* Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
* produced by asn1_compiler. Action functions are called on marked tags to
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 00bca223d1e1..0211d30d8c39 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -8,6 +8,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/kernel.h>
@@ -146,18 +149,18 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
#ifdef CONFIG_X86
- printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
+ pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
- "x86-64",
+ "x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
- "i586+",
+ "i586+",
#else
- "i386+",
+ "i386+",
#endif
boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
- printk(KERN_INFO "atomic64 test passed\n");
+ pr_info("passed\n");
#endif
return 0;
diff --git a/lib/audit.c b/lib/audit.c
index 76bbed4a20e5..1d726a22565b 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -30,11 +30,17 @@ static unsigned signal_class[] = {
int audit_classify_arch(int arch)
{
- return 0;
+ if (audit_is_compat(arch))
+ return 1;
+ else
+ return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
+ if (audit_is_compat(abi))
+ return audit_classify_compat_syscall(abi, syscall);
+
switch(syscall) {
#ifdef __NR_open
case __NR_open:
@@ -57,6 +63,13 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void)
{
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+ audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
+#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/btree.c b/lib/btree.c
index f9a484676cb6..4264871ea1a0 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
void btree_destroy(struct btree_head *head)
{
+ mempool_free(head->node, head->mempool);
mempool_destroy(head->mempool);
head->mempool = NULL;
}
diff --git a/lib/bug.c b/lib/bug.c
index 168603477f02..d1d7c7878900 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,9 @@
Jeremy Fitzhardinge <jeremy@goop.org> 2006
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -153,15 +156,13 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
- printk(KERN_WARNING "------------[ cut here ]------------\n");
+ pr_warn("------------[ cut here ]------------\n");
if (file)
- printk(KERN_WARNING "WARNING: at %s:%u\n",
- file, line);
+ pr_warn("WARNING: at %s:%u\n", file, line);
else
- printk(KERN_WARNING "WARNING: at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
print_modules();
show_regs(regs);
@@ -174,12 +175,10 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
printk(KERN_DEFAULT "------------[ cut here ]------------\n");
if (file)
- printk(KERN_CRIT "kernel BUG at %s:%u!\n",
- file, line);
+ pr_crit("kernel BUG at %s:%u!\n", file, line);
else
- printk(KERN_CRIT "Kernel BUG at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index a8f8379eb49f..2e11e48446ab 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -6,6 +6,9 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ * The functions in this file aren't called directly, but are required by
+ * GCC builtins such as __builtin_ctz, and therefore they can't be removed
+ * despite appearing unreferenced in kernel source.
*
* __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
*/
@@ -13,18 +16,22 @@
#include <linux/export.h>
#include <linux/kernel.h>
+int __weak __ctzsi2(int val);
int __weak __ctzsi2(int val)
{
return __ffs(val);
}
EXPORT_SYMBOL(__ctzsi2);
+int __weak __clzsi2(int val);
int __weak __clzsi2(int val)
{
return 32 - fls(val);
}
EXPORT_SYMBOL(__clzsi2);
+int __weak __clzdi2(long val);
+int __weak __ctzdi2(long val);
#if BITS_PER_LONG == 32
int __weak __clzdi2(long val)
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 000000000000..873f75b640ab
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,50 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/unistd32.h>
+
+unsigned compat_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned compat_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned compat_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned compat_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned compat_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_compat_syscall(int abi, unsigned syscall)
+{
+ switch (syscall) {
+#ifdef __NR_open
+ case __NR_open:
+ return 2;
+#endif
+#ifdef __NR_openat
+ case __NR_openat:
+ return 3;
+#endif
+#ifdef __NR_socketcall
+ case __NR_socketcall:
+ return 4;
+#endif
+ case __NR_execve:
+ return 5;
+ default:
+ return 1;
+ }
+}
diff --git a/lib/crc32.c b/lib/crc32.c
index 70f00ca5ef1e..21a7b2135af6 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -33,13 +33,13 @@
#include "crc32defs.h"
#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) __constant_cpu_to_le32(x))
+# define tole(x) ((__force u32) cpu_to_le32(x))
#else
# define tole(x) (x)
#endif
#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) __constant_cpu_to_be32(x))
+# define tobe(x) ((__force u32) cpu_to_be32(x))
#else
# define tobe(x) (x)
#endif
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e0731c3db706..547f7f923dbc 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -7,6 +7,9 @@
*
* For licencing details see kernel-base/COPYING
*/
+
+#define pr_fmt(fmt) "ODEBUG: " fmt
+
#include <linux/debugobjects.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -218,7 +221,7 @@ static void debug_objects_oom(void)
unsigned long flags;
int i;
- printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+ pr_warn("Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
raw_spin_lock_irqsave(&db->lock, flags);
@@ -292,11 +295,9 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- printk(KERN_WARNING
- "ODEBUG: object is on stack, but not annotated\n");
+ pr_warn("object is on stack, but not annotated\n");
else
- printk(KERN_WARNING
- "ODEBUG: object is not on stack, but annotated\n");
+ pr_warn("object is not on stack, but annotated\n");
WARN_ON(1);
}
@@ -985,7 +986,7 @@ static void __init debug_objects_selftest(void)
if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
goto out;
#endif
- printk(KERN_INFO "ODEBUG: selftest passed\n");
+ pr_info("selftest passed\n");
out:
debug_objects_fixups = oldfixups;
@@ -1060,8 +1061,8 @@ static int __init debug_objects_replace_static_objects(void)
}
local_irq_enable();
- printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
- obj_pool_used);
+ pr_debug("%d of %d active objects replaced\n",
+ cnt, obj_pool_used);
return 0;
free:
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
@@ -1090,7 +1091,7 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
if (obj_cache)
kmem_cache_destroy(obj_cache);
- printk(KERN_WARNING "ODEBUG: out of memory.\n");
+ pr_warn("out of memory.\n");
} else
debug_objects_selftest();
}
diff --git a/lib/decompress.c b/lib/decompress.c
index 4d1cd0397aab..86069d74c062 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/printk.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
@@ -61,6 +62,8 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
if (len < 2)
return NULL; /* Need at least this much... */
+ pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
+
for (cf = compressed_formats; cf->name; cf++) {
if (!memcmp(inbuf, cf->magic, 2))
break;
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d619b28c456f..0edfd742a154 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,6 +19,7 @@
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
+#include <linux/decompress/inflate.h>
#endif /* STATIC */
diff --git a/lib/devres.c b/lib/devres.c
index 823533138fa0..f562bf6ff71d 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -81,11 +81,13 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
void devm_iounmap(struct device *dev, void __iomem *addr)
{
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
- (void *)addr));
+ (__force void *)addr));
iounmap(addr);
}
EXPORT_SYMBOL(devm_iounmap);
+#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
@@ -114,7 +116,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!res || resource_type(res) != IORESOURCE_MEM) {
dev_err(dev, "invalid resource\n");
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
size = resource_size(res);
@@ -122,7 +124,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!devm_request_mem_region(dev, res->start, size, name)) {
dev_err(dev, "can't request region for resource %pR\n", res);
- return ERR_PTR(-EBUSY);
+ return IOMEM_ERR_PTR(-EBUSY);
}
if (res->flags & IORESOURCE_CACHEABLE)
@@ -133,7 +135,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
- dest_ptr = ERR_PTR(-ENOMEM);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
}
return dest_ptr;
@@ -155,12 +157,12 @@ EXPORT_SYMBOL(devm_ioremap_resource);
* if (!base)
* return -EADDRNOTAVAIL;
*/
-void __iomem *devm_request_and_ioremap(struct device *device,
+void __iomem *devm_request_and_ioremap(struct device *dev,
struct resource *res)
{
void __iomem *dest_ptr;
- dest_ptr = devm_ioremap_resource(device, res);
+ dest_ptr = devm_ioremap_resource(dev, res);
if (IS_ERR(dest_ptr))
return NULL;
@@ -168,7 +170,7 @@ void __iomem *devm_request_and_ioremap(struct device *device,
}
EXPORT_SYMBOL(devm_request_and_ioremap);
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
*/
@@ -192,7 +194,7 @@ static int devm_ioport_map_match(struct device *dev, void *res,
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
@@ -224,10 +226,10 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
{
ioport_unmap(addr);
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
- devm_ioport_map_match, (void *)addr));
+ devm_ioport_map_match, (__force void *)addr));
}
EXPORT_SYMBOL(devm_ioport_unmap);
-#endif /* CONFIG_HAS_IOPORT */
+#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/*
@@ -263,7 +265,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* be safely called without context and guaranteed to succed once
* allocated.
*/
-void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
@@ -288,7 +290,7 @@ EXPORT_SYMBOL(pcim_iomap_table);
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
diff --git a/lib/digsig.c b/lib/digsig.c
index 8793aeda30ca..ae05ea393fc8 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -175,10 +175,11 @@ err1:
* digsig_verify() - digital signature verification with public key
* @keyring: keyring to search key in
* @sig: digital signature
- * @sigen: length of the signature
+ * @siglen: length of the signature
* @data: data
* @datalen: length of the data
- * @return: 0 on success, -EINVAL otherwise
+ *
+ * Returns 0 on success, -EINVAL otherwise
*
* Verifies data integrity against digital signature.
* Currently only RSA is supported.
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index f23b63f0a1c3..6745c6230db3 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
int was_locked;
int old;
@@ -55,7 +55,7 @@ retry:
preempt_enable();
}
#else
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000000..5d30c58150ad
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/idr.c b/lib/idr.c
index bfe4db4e165f..39158abebad1 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -18,12 +18,6 @@
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
-
- * You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
- * don't need to go to the memory "store" during an id allocate, just
- * so you don't need to be too concerned about locking and conflicts
- * with the slab allocator.
*/
#ifndef TEST // to test in user space...
@@ -151,7 +145,7 @@ static void idr_layer_rcu_free(struct rcu_head *head)
static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
- if (idr->hint && idr->hint == p)
+ if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
@@ -196,7 +190,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
}
-int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
+static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -207,7 +201,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
}
return 1;
}
-EXPORT_SYMBOL(__idr_pre_get);
/**
* sub_alloc - try to allocate an id without growing the tree depth
@@ -250,7 +243,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (id >= 1 << (idp->layers * IDR_BITS)) {
+ if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
@@ -374,20 +367,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
-int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
-{
- struct idr_layer *pa[MAX_IDR_LEVEL + 1];
- int rv;
-
- rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
- if (rv < 0)
- return rv == -ENOMEM ? -EAGAIN : rv;
-
- idr_fill_slot(idp, ptr, rv, pa);
- *id = rv;
- return 0;
-}
-EXPORT_SYMBOL(__idr_get_new_above);
/**
* idr_preload - preload for idr_alloc()
@@ -548,7 +527,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, p->bitmap))) {
__clear_bit(n, p->bitmap);
- rcu_assign_pointer(p->ary[n], NULL);
+ RCU_INIT_POINTER(p->ary[n], NULL);
to_free = NULL;
while(*paa && ! --((**paa)->count)){
if (to_free)
@@ -577,6 +556,11 @@ void idr_remove(struct idr *idp, int id)
if (id < 0)
return;
+ if (id > idr_max(idp->layers)) {
+ idr_remove_warning(id);
+ return;
+ }
+
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) {
@@ -594,20 +578,10 @@ void idr_remove(struct idr *idp, int id)
bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free);
}
- while (idp->id_free_cnt >= MAX_IDR_FREE) {
- p = get_from_free_list(idp);
- /*
- * Note: we don't call the rcu callback here, since the only
- * layers that fall into the freelist are those that have been
- * preallocated.
- */
- kmem_cache_free(idr_layer_cache, p);
- }
- return;
}
EXPORT_SYMBOL(idr_remove);
-void __idr_remove_all(struct idr *idp)
+static void __idr_remove_all(struct idr *idp)
{
int n, id, max;
int bt_mask;
@@ -617,7 +591,7 @@ void __idr_remove_all(struct idr *idp)
n = idp->layers * IDR_BITS;
p = idp->top;
- rcu_assign_pointer(idp->top, NULL);
+ RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
@@ -640,7 +614,6 @@ void __idr_remove_all(struct idr *idp)
}
idp->layers = 0;
}
-EXPORT_SYMBOL(__idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
@@ -825,14 +798,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
p = idp->top;
if (!p)
- return ERR_PTR(-EINVAL);
-
- n = (p->layer+1) * IDR_BITS;
+ return ERR_PTR(-ENOENT);
- if (id >= (1 << n))
- return ERR_PTR(-EINVAL);
+ if (id > idr_max(p->layer + 1))
+ return ERR_PTR(-ENOENT);
- n -= IDR_BITS;
+ n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
@@ -869,6 +840,16 @@ void idr_init(struct idr *idp)
}
EXPORT_SYMBOL(idr_init);
+static int idr_has_entry(int id, void *p, void *data)
+{
+ return 1;
+}
+
+bool idr_is_empty(struct idr *idp)
+{
+ return !idr_for_each(idp, idr_has_entry, NULL);
+}
+EXPORT_SYMBOL(idr_is_empty);
/**
* DOC: IDA description
@@ -1033,6 +1014,9 @@ void ida_remove(struct ida *ida, int id)
int n;
struct ida_bitmap *bitmap;
+ if (idr_id > idr_max(ida->idr.layers))
+ goto err;
+
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
@@ -1048,7 +1032,7 @@ void ida_remove(struct ida *ida, int id)
__clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n];
- if (!test_bit(offset, bitmap->bitmap))
+ if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err;
/* update bitmap and remove it if empty */
diff --git a/lib/iomap.c b/lib/iomap.c
index 2c08f36862eb..fc3dcb4b238e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -239,7 +239,7 @@ void ioport_unmap(void __iomem *addr)
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-#endif /* CONFIG_HAS_IOPORT */
+#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/* Hide the details if this is a MMIO or PIO address space and just do what
diff --git a/lib/kobject.c b/lib/kobject.c
index cb14aeac4cca..58751bb80a7c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -94,7 +94,7 @@ static int create_dir(struct kobject *kobj)
BUG_ON(ops->type >= KOBJ_NS_TYPES);
BUG_ON(!kobj_ns_type_registered(ops->type));
- kernfs_enable_ns(kobj->sd);
+ sysfs_enable_ns(kobj->sd);
}
return 0;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5f72767ddd9b..9ebf9e20de53 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,7 +29,9 @@
u64 uevent_seqnum;
+#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+#endif
#ifdef CONFIG_NET
struct uevent_sock {
struct list_head list;
@@ -109,6 +111,7 @@ static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
}
#endif
+#ifdef CONFIG_UEVENT_HELPER
static int kobj_usermode_filter(struct kobject *kobj)
{
const struct kobj_ns_type_operations *ops;
@@ -124,6 +127,31 @@ static int kobj_usermode_filter(struct kobject *kobj)
return 0;
}
+static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
+{
+ int len;
+
+ len = strlcpy(&env->buf[env->buflen], subsystem,
+ sizeof(env->buf) - env->buflen);
+ if (len >= (sizeof(env->buf) - env->buflen)) {
+ WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
+ return -ENOMEM;
+ }
+
+ env->argv[0] = uevent_helper;
+ env->argv[1] = &env->buf[env->buflen];
+ env->argv[2] = NULL;
+
+ env->buflen += len + 1;
+ return 0;
+}
+
+static void cleanup_uevent_env(struct subprocess_info *info)
+{
+ kfree(info->data);
+}
+#endif
+
/**
* kobject_uevent_env - send an uevent with environmental data
*
@@ -299,13 +327,11 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
#endif
mutex_unlock(&uevent_sock_mutex);
+#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
- char *argv [3];
+ struct subprocess_info *info;
- argv [0] = uevent_helper;
- argv [1] = (char *)subsystem;
- argv [2] = NULL;
retval = add_uevent_var(env, "HOME=/");
if (retval)
goto exit;
@@ -313,10 +339,20 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
"PATH=/sbin:/bin:/usr/sbin:/usr/bin");
if (retval)
goto exit;
+ retval = init_uevent_argv(env, subsystem);
+ if (retval)
+ goto exit;
- retval = call_usermodehelper(argv[0], argv,
- env->envp, UMH_WAIT_EXEC);
+ retval = -ENOMEM;
+ info = call_usermodehelper_setup(env->argv[0], env->argv,
+ env->envp, GFP_KERNEL,
+ NULL, cleanup_uevent_env, env);
+ if (info) {
+ retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
+ env = NULL; /* freed by cleanup_uevent_env */
+ }
}
+#endif
exit:
kfree(devpath);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 244f5480c898..b3131f5cf8a2 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -62,10 +62,7 @@ EXPORT_SYMBOL(crc32c);
static int __init libcrc32c_mod_init(void)
{
tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tfm);
}
static void __exit libcrc32c_mod_fini(void)
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca7809b08..9c3e85ff0a6c 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -136,6 +136,7 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
errout:
return err;
}
+EXPORT_SYMBOL(nla_validate);
/**
* nla_policy_len - Determin the max. length of a policy
@@ -162,6 +163,7 @@ nla_policy_len(const struct nla_policy *p, int n)
return len;
}
+EXPORT_SYMBOL(nla_policy_len);
/**
* nla_parse - Parse a stream of attributes into a tb buffer
@@ -201,13 +203,14 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
}
if (unlikely(rem > 0))
- printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
- "attributes.\n", rem);
+ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+ rem, current->comm);
err = 0;
errout:
return err;
}
+EXPORT_SYMBOL(nla_parse);
/**
* nla_find - Find a specific attribute in a stream of attributes
@@ -228,6 +231,7 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
return NULL;
}
+EXPORT_SYMBOL(nla_find);
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
@@ -258,6 +262,7 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
return srclen;
}
+EXPORT_SYMBOL(nla_strlcpy);
/**
* nla_memcpy - Copy a netlink attribute into another memory area
@@ -278,6 +283,7 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
return minlen;
}
+EXPORT_SYMBOL(nla_memcpy);
/**
* nla_memcmp - Compare an attribute with sized memory area
@@ -295,6 +301,7 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
return d;
}
+EXPORT_SYMBOL(nla_memcmp);
/**
* nla_strcmp - Compare a string attribute against a string
@@ -303,14 +310,21 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
- int len = strlen(str) + 1;
- int d = nla_len(nla) - len;
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+ if (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
+EXPORT_SYMBOL(nla_strcmp);
#ifdef CONFIG_NET
/**
@@ -496,12 +510,3 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
}
EXPORT_SYMBOL(nla_append);
#endif
-
-EXPORT_SYMBOL(nla_validate);
-EXPORT_SYMBOL(nla_policy_len);
-EXPORT_SYMBOL(nla_parse);
-EXPORT_SYMBOL(nla_find);
-EXPORT_SYMBOL(nla_strlcpy);
-EXPORT_SYMBOL(nla_memcpy);
-EXPORT_SYMBOL(nla_memcmp);
-EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8280a5dd1727..7dd33577b905 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
struct percpu_counter *fbc;
compute_batch_value();
- if (action != CPU_DEAD)
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
diff --git a/lib/plist.c b/lib/plist.c
index 1ebc95f7a46f..d408e774b746 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
plist_check_head(head);
}
+/**
+ * plist_requeue - Requeue @node at end of same-prio entries.
+ *
+ * This is essentially an optimized plist_del() followed by
+ * plist_add(). It moves an entry already in the plist to
+ * after any other same-priority entries.
+ *
+ * @node: &struct plist_node pointer - entry to be moved
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_requeue(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ BUG_ON(plist_head_empty(head));
+ BUG_ON(plist_node_empty(node));
+
+ if (node == plist_last(head))
+ return;
+
+ iter = plist_next(node);
+
+ if (node->prio != iter->prio)
+ return;
+
+ plist_del(node, head);
+
+ plist_for_each_continue(iter, head) {
+ if (node->prio != iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+ }
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
#ifdef CONFIG_DEBUG_PI_LIST
#include <linux/sched.h>
#include <linux/module.h>
@@ -170,12 +210,20 @@ static void __init plist_test_check(int nr_expect)
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
+static void __init plist_test_requeue(struct plist_node *node)
+{
+ plist_requeue(node, &test_head);
+
+ if (node != plist_last(&test_head))
+ BUG_ON(node->prio == plist_next(node)->prio);
+}
+
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
- pr_debug("start plist test\n");
+ printk(KERN_DEBUG "start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
@@ -193,6 +241,10 @@ static int __init plist_test(void)
nr_expect--;
}
plist_test_check(nr_expect);
+ if (!plist_node_empty(test_node + i)) {
+ plist_test_requeue(test_node + i);
+ plist_test_check(nr_expect);
+ }
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
@@ -203,7 +255,7 @@ static int __init plist_test(void)
plist_test_check(nr_expect);
}
- pr_debug("end plist test\n");
+ printk(KERN_DEBUG "end plist test\n");
return 0;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd4a8dfdf0b8..3291a8e37490 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -27,6 +27,7 @@
#include <linux/radix-tree.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/string.h>
@@ -35,33 +36,6 @@
#include <linux/hardirq.h> /* in_interrupt() */
-#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
-#else
-#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
-#endif
-
-#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
-#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
-
-#define RADIX_TREE_TAG_LONGS \
- ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
-
-struct radix_tree_node {
- unsigned int height; /* Height from the bottom */
- unsigned int count;
- union {
- struct radix_tree_node *parent; /* Used when ascending tree */
- struct rcu_head rcu_head; /* Used when freeing node */
- };
- void __rcu *slots[RADIX_TREE_MAP_SIZE];
- unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
-#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-
/*
* The height_to_maxindex array needs to be one deeper than the maximum
* path as height 0 holds only 1 entry.
@@ -221,12 +195,17 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(ret);
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -277,14 +256,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
int ret = -ENOMEM;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
rtp->nodes[rtp->nr++] = node;
else
@@ -369,7 +348,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Increase the height. */
newheight = root->height+1;
- node->height = newheight;
+ BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK);
+ node->path = newheight;
node->count = 1;
node->parent = NULL;
slot = root->rnode;
@@ -387,23 +367,28 @@ out:
}
/**
- * radix_tree_insert - insert into a radix tree
+ * __radix_tree_create - create a slot in a radix tree
* @root: radix tree root
* @index: index key
- * @item: item to insert
+ * @nodep: returns node
+ * @slotp: returns slot
*
- * Insert an item into the radix tree at position @index.
+ * Create, if necessary, and return the node and slot for an item
+ * at position @index in the radix tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->rnode is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
+ *
+ * Returns -ENOMEM, or 0 for success.
*/
-int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *item)
+int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp)
{
struct radix_tree_node *node = NULL, *slot;
- unsigned int height, shift;
- int offset;
+ unsigned int height, shift, offset;
int error;
- BUG_ON(radix_tree_is_indirect_ptr(item));
-
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
@@ -422,11 +407,12 @@ int radix_tree_insert(struct radix_tree_root *root,
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
return -ENOMEM;
- slot->height = height;
+ slot->path = height;
slot->parent = node;
if (node) {
rcu_assign_pointer(node->slots[offset], slot);
node->count++;
+ slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT;
} else
rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
}
@@ -439,16 +425,42 @@ int radix_tree_insert(struct radix_tree_root *root,
height--;
}
- if (slot != NULL)
+ if (nodep)
+ *nodep = node;
+ if (slotp)
+ *slotp = node ? node->slots + offset : (void **)&root->rnode;
+ return 0;
+}
+
+/**
+ * radix_tree_insert - insert into a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @item: item to insert
+ *
+ * Insert an item into the radix tree at position @index.
+ */
+int radix_tree_insert(struct radix_tree_root *root,
+ unsigned long index, void *item)
+{
+ struct radix_tree_node *node;
+ void **slot;
+ int error;
+
+ BUG_ON(radix_tree_is_indirect_ptr(item));
+
+ error = __radix_tree_create(root, index, &node, &slot);
+ if (error)
+ return error;
+ if (*slot != NULL)
return -EEXIST;
+ rcu_assign_pointer(*slot, item);
if (node) {
node->count++;
- rcu_assign_pointer(node->slots[offset], item);
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK));
+ BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK));
} else {
- rcu_assign_pointer(root->rnode, item);
BUG_ON(root_tag_get(root, 0));
BUG_ON(root_tag_get(root, 1));
}
@@ -457,15 +469,26 @@ int radix_tree_insert(struct radix_tree_root *root,
}
EXPORT_SYMBOL(radix_tree_insert);
-/*
- * is_slot == 1 : search for the slot.
- * is_slot == 0 : search for the node.
+/**
+ * __radix_tree_lookup - lookup an item in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @nodep: returns node
+ * @slotp: returns slot
+ *
+ * Lookup and return the item at position @index in the radix
+ * tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->rnode is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
*/
-static void *radix_tree_lookup_element(struct radix_tree_root *root,
- unsigned long index, int is_slot)
+void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp)
{
+ struct radix_tree_node *node, *parent;
unsigned int height, shift;
- struct radix_tree_node *node, **slot;
+ void **slot;
node = rcu_dereference_raw(root->rnode);
if (node == NULL)
@@ -474,19 +497,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
if (!radix_tree_is_indirect_ptr(node)) {
if (index > 0)
return NULL;
- return is_slot ? (void *)&root->rnode : node;
+
+ if (nodep)
+ *nodep = NULL;
+ if (slotp)
+ *slotp = (void **)&root->rnode;
+ return node;
}
node = indirect_to_ptr(node);
- height = node->height;
+ height = node->path & RADIX_TREE_HEIGHT_MASK;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
do {
- slot = (struct radix_tree_node **)
- (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+ parent = node;
+ slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK);
node = rcu_dereference_raw(*slot);
if (node == NULL)
return NULL;
@@ -495,7 +523,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
height--;
} while (height > 0);
- return is_slot ? (void *)slot : indirect_to_ptr(node);
+ if (nodep)
+ *nodep = parent;
+ if (slotp)
+ *slotp = slot;
+ return node;
}
/**
@@ -513,7 +545,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
- return (void **)radix_tree_lookup_element(root, index, 1);
+ void **slot;
+
+ if (!__radix_tree_lookup(root, index, NULL, &slot))
+ return NULL;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_lookup_slot);
@@ -531,7 +567,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
*/
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
- return radix_tree_lookup_element(root, index, 0);
+ return __radix_tree_lookup(root, index, NULL, NULL);
}
EXPORT_SYMBOL(radix_tree_lookup);
@@ -676,7 +712,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
return (index == 0);
node = indirect_to_ptr(node);
- height = node->height;
+ height = node->path & RADIX_TREE_HEIGHT_MASK;
if (index > radix_tree_maxindex(height))
return 0;
@@ -713,7 +749,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
{
unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
struct radix_tree_node *rnode, *node;
- unsigned long index, offset;
+ unsigned long index, offset, height;
if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
return NULL;
@@ -744,7 +780,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
return NULL;
restart:
- shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT;
+ height = rnode->path & RADIX_TREE_HEIGHT_MASK;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
offset = index >> shift;
/* Index outside of the tree */
@@ -946,81 +983,6 @@ next:
}
EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
-
-/**
- * radix_tree_next_hole - find the next hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
- * indexed hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'return - index >= max_scan'
- * will be true). In rare cases of index wrap-around, 0 will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 5, then subsequently a hole is created at index 10,
- * radix_tree_next_hole covering both indexes may return 10 if called
- * under rcu_read_lock.
- */
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index++;
- if (index == 0)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_next_hole);
-
-/**
- * radix_tree_prev_hole - find the prev hole (not-present entry)
- * @root: tree root
- * @index: index key
- * @max_scan: maximum range to search
- *
- * Search backwards in the range [max(index-max_scan+1, 0), index]
- * for the first hole.
- *
- * Returns: the index of the hole if found, otherwise returns an index
- * outside of the set specified (in which case 'index - return >= max_scan'
- * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
- *
- * radix_tree_next_hole may be called under rcu_read_lock. However, like
- * radix_tree_gang_lookup, this will not atomically search a snapshot of
- * the tree at a single point in time. For example, if a hole is created
- * at index 10, then subsequently a hole is created at index 5,
- * radix_tree_prev_hole covering both indexes may return 5 if called under
- * rcu_read_lock.
- */
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
- unsigned long index, unsigned long max_scan)
-{
- unsigned long i;
-
- for (i = 0; i < max_scan; i++) {
- if (!radix_tree_lookup(root, index))
- break;
- index--;
- if (index == ULONG_MAX)
- break;
- }
-
- return index;
-}
-EXPORT_SYMBOL(radix_tree_prev_hole);
-
/**
* radix_tree_gang_lookup - perform multiple lookup on a radix tree
* @root: radix tree root
@@ -1189,7 +1151,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
unsigned int shift, height;
unsigned long i;
- height = slot->height;
+ height = slot->path & RADIX_TREE_HEIGHT_MASK;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
for ( ; height > 1; height--) {
@@ -1252,7 +1214,8 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
}
node = indirect_to_ptr(node);
- max_index = radix_tree_maxindex(node->height);
+ max_index = radix_tree_maxindex(node->path &
+ RADIX_TREE_HEIGHT_MASK);
if (cur_index > max_index) {
rcu_read_unlock();
break;
@@ -1337,48 +1300,89 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
}
/**
- * radix_tree_delete - delete an item from a radix tree
+ * __radix_tree_delete_node - try to free node after clearing a slot
+ * @root: radix tree root
+ * @node: node containing @index
+ *
+ * After clearing the slot at @index in @node from radix tree
+ * rooted at @root, call this function to attempt freeing the
+ * node and shrinking the tree.
+ *
+ * Returns %true if @node was freed, %false otherwise.
+ */
+bool __radix_tree_delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node)
+{
+ bool deleted = false;
+
+ do {
+ struct radix_tree_node *parent;
+
+ if (node->count) {
+ if (node == indirect_to_ptr(root->rnode)) {
+ radix_tree_shrink(root);
+ if (root->height == 0)
+ deleted = true;
+ }
+ return deleted;
+ }
+
+ parent = node->parent;
+ if (parent) {
+ unsigned int offset;
+
+ offset = node->path >> RADIX_TREE_HEIGHT_SHIFT;
+ parent->slots[offset] = NULL;
+ parent->count--;
+ } else {
+ root_tag_clear_all(root);
+ root->height = 0;
+ root->rnode = NULL;
+ }
+
+ radix_tree_node_free(node);
+ deleted = true;
+
+ node = parent;
+ } while (node);
+
+ return deleted;
+}
+
+/**
+ * radix_tree_delete_item - delete an item from a radix tree
* @root: radix tree root
* @index: index key
+ * @item: expected item
*
- * Remove the item at @index from the radix tree rooted at @root.
+ * Remove @item at @index from the radix tree rooted at @root.
*
- * Returns the address of the deleted item, or NULL if it was not present.
+ * Returns the address of the deleted item, or NULL if it was not present
+ * or the entry at the given @index was not @item.
*/
-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_delete_item(struct radix_tree_root *root,
+ unsigned long index, void *item)
{
- struct radix_tree_node *node = NULL;
- struct radix_tree_node *slot = NULL;
- struct radix_tree_node *to_free;
- unsigned int height, shift;
+ struct radix_tree_node *node;
+ unsigned int offset;
+ void **slot;
+ void *entry;
int tag;
- int uninitialized_var(offset);
- height = root->height;
- if (index > radix_tree_maxindex(height))
- goto out;
+ entry = __radix_tree_lookup(root, index, &node, &slot);
+ if (!entry)
+ return NULL;
- slot = root->rnode;
- if (height == 0) {
+ if (item && entry != item)
+ return NULL;
+
+ if (!node) {
root_tag_clear_all(root);
root->rnode = NULL;
- goto out;
+ return entry;
}
- slot = indirect_to_ptr(slot);
- shift = height * RADIX_TREE_MAP_SHIFT;
- do {
- if (slot == NULL)
- goto out;
-
- shift -= RADIX_TREE_MAP_SHIFT;
- offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- node = slot;
- slot = slot->slots[offset];
- } while (shift);
-
- if (slot == NULL)
- goto out;
+ offset = index & RADIX_TREE_MAP_MASK;
/*
* Clear all tags associated with the item to be deleted.
@@ -1389,40 +1393,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
radix_tree_tag_clear(root, index, tag);
}
- to_free = NULL;
- /* Now free the nodes we do not need anymore */
- while (node) {
- node->slots[offset] = NULL;
- node->count--;
- /*
- * Queue the node for deferred freeing after the
- * last reference to it disappears (set NULL, above).
- */
- if (to_free)
- radix_tree_node_free(to_free);
-
- if (node->count) {
- if (node == indirect_to_ptr(root->rnode))
- radix_tree_shrink(root);
- goto out;
- }
-
- /* Node with zero slots in use so free it */
- to_free = node;
+ node->slots[offset] = NULL;
+ node->count--;
- index >>= RADIX_TREE_MAP_SHIFT;
- offset = index & RADIX_TREE_MAP_MASK;
- node = node->parent;
- }
+ __radix_tree_delete_node(root, node);
- root_tag_clear_all(root);
- root->height = 0;
- root->rnode = NULL;
- if (to_free)
- radix_tree_node_free(to_free);
+ return entry;
+}
+EXPORT_SYMBOL(radix_tree_delete_item);
-out:
- return slot;
+/**
+ * radix_tree_delete - delete an item from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Remove the item at @index from the radix tree rooted at @root.
+ *
+ * Returns the address of the deleted item, or NULL if it was not present.
+ */
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+ return radix_tree_delete_item(root, index, NULL);
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -1438,9 +1429,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
EXPORT_SYMBOL(radix_tree_tagged);
static void
-radix_tree_node_ctor(void *node)
+radix_tree_node_ctor(void *arg)
{
- memset(node, 0, sizeof(struct radix_tree_node));
+ struct radix_tree_node *node = arg;
+
+ memset(node, 0, sizeof(*node));
+ INIT_LIST_HEAD(&node->private_list);
}
static __init unsigned long __maxindex(unsigned int height)
diff --git a/lib/random32.c b/lib/random32.c
index 614896778700..fa5da61ce7ad 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -1,37 +1,35 @@
/*
- This is a maximally equidistributed combined Tausworthe generator
- based on code from GNU Scientific Library 1.5 (30 Jun 2004)
-
- lfsr113 version:
-
- x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
-
- s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
- s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
- s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
- s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
-
- The period of this generator is about 2^113 (see erratum paper).
-
- From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
- Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
- ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
-
- There is an erratum in the paper "Tables of Maximally
- Equidistributed Combined LFSR Generators", Mathematics of
- Computation, 68, 225 (1999), 261--269:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
-
- ... the k_j most significant bits of z_j must be non-
- zero, for each j. (Note: this restriction also applies to the
- computer code given in [4], but was mistakenly not mentioned in
- that paper.)
-
- This affects the seeding procedure by imposing the requirement
- s1 > 1, s2 > 7, s3 > 15, s4 > 127.
-
-*/
+ * This is a maximally equidistributed combined Tausworthe generator
+ * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+ *
+ * lfsr113 version:
+ *
+ * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
+ *
+ * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
+ * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
+ * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
+ * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
+ *
+ * The period of this generator is about 2^113 (see erratum paper).
+ *
+ * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+ * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
+ * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+ *
+ * There is an erratum in the paper "Tables of Maximally Equidistributed
+ * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
+ * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+ *
+ * ... the k_j most significant bits of z_j must be non-zero,
+ * for each j. (Note: this restriction also applies to the
+ * computer code given in [4], but was mistakenly not mentioned
+ * in that paper.)
+ *
+ * This affects the seeding procedure by imposing the requirement
+ * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
+ */
#include <linux/types.h>
#include <linux/percpu.h>
@@ -75,15 +73,17 @@ EXPORT_SYMBOL(prandom_u32_state);
*/
u32 prandom_u32(void)
{
- unsigned long r;
struct rnd_state *state = &get_cpu_var(net_rand_state);
- r = prandom_u32_state(state);
+ u32 res;
+
+ res = prandom_u32_state(state);
put_cpu_var(state);
- return r;
+
+ return res;
}
EXPORT_SYMBOL(prandom_u32);
-/*
+/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -204,6 +204,7 @@ static int __init prandom_init(void)
prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
prandom_warmup(state);
}
+
return 0;
}
core_initcall(prandom_init);
@@ -259,6 +260,7 @@ static void __prandom_reseed(bool late)
if (latch && !late)
goto out;
+
latch = true;
for_each_possible_cpu(i) {
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 04abe53f12a1..1afec32de6f2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,8 @@
#include <linux/kallsyms.h>
#include <linux/sched.h>
-notrace unsigned int debug_smp_processor_id(void)
+notrace static unsigned int check_preemption_disabled(const char *what1,
+ const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
- "code: %s/%d\n",
- preempt_count() - 1, current->comm, current->pid);
+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
+ what1, what2, preempt_count() - 1, current->comm, current->pid);
+
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
@@ -50,5 +51,14 @@ out:
return this_cpu;
}
+notrace unsigned int debug_smp_processor_id(void)
+{
+ return check_preemption_disabled("smp_processor_id", "");
+}
EXPORT_SYMBOL(debug_smp_processor_id);
+notrace void __this_cpu_preempt_check(const char *op)
+{
+ check_preemption_disabled("__this_cpu_", op);
+}
+EXPORT_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/string.c b/lib/string.c
index e5878de4f101..992bf30af759 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL(strcpy);
#ifndef __HAVE_ARCH_STRNCPY
/**
- * strncpy - Copy a length-limited, %NUL-terminated string
+ * strncpy - Copy a length-limited, C-string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: The maximum number of bytes to copy
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(strncpy);
#ifndef __HAVE_ARCH_STRLCPY
/**
- * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * strlcpy - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(strcat);
#ifndef __HAVE_ARCH_STRNCAT
/**
- * strncat - Append a length-limited, %NUL-terminated string to another
+ * strncat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The maximum numbers of bytes to copy
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(strncat);
#ifndef __HAVE_ARCH_STRLCAT
/**
- * strlcat - Append a length-limited, %NUL-terminated string to another
+ * strlcat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The size of the destination buffer.
@@ -301,6 +301,24 @@ char *strchr(const char *s, int c)
EXPORT_SYMBOL(strchr);
#endif
+#ifndef __HAVE_ARCH_STRCHRNUL
+/**
+ * strchrnul - Find and return a character in a string, or end of string
+ * @s: The string to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to first occurrence of 'c' in s. If c is not found, then
+ * return a pointer to the null byte at the end of s.
+ */
+char *strchrnul(const char *s, int c)
+{
+ while (*s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchrnul);
+#endif
+
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
@@ -648,7 +666,7 @@ EXPORT_SYMBOL(memmove);
* @count: The size of the area.
*/
#undef memcmp
-int memcmp(const void *cs, const void *ct, size_t count)
+__visible int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b604b831f4d1..649d097853a1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -374,7 +374,7 @@ void __init swiotlb_free(void)
io_tlb_nslabs = 0;
}
-static int is_swiotlb_buffer(phys_addr_t paddr)
+int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
diff --git a/lib/syscall.c b/lib/syscall.c
index 58710eefeac8..e30e03932480 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno,
return 0;
}
-EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index e0cc0146ae62..0c7e9ab2d88f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -159,6 +159,7 @@ errout:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_register);
/**
* textsearch_unregister - unregister a textsearch module
@@ -190,6 +191,7 @@ out:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_unregister);
struct ts_linear_state
{
@@ -236,6 +238,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
return textsearch_find(conf, state);
}
+EXPORT_SYMBOL(textsearch_find_continuous);
/**
* textsearch_prepare - Prepare a search
@@ -298,6 +301,7 @@ errout:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(textsearch_prepare);
/**
* textsearch_destroy - destroy a search configuration
@@ -316,9 +320,4 @@ void textsearch_destroy(struct ts_config *conf)
kfree(conf);
}
-
-EXPORT_SYMBOL(textsearch_register);
-EXPORT_SYMBOL(textsearch_unregister);
-EXPORT_SYMBOL(textsearch_prepare);
-EXPORT_SYMBOL(textsearch_find_continuous);
EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 185b6d300ebc..6fe2c84eb055 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -364,7 +364,6 @@ enum format_type {
FORMAT_TYPE_SHORT,
FORMAT_TYPE_UINT,
FORMAT_TYPE_INT,
- FORMAT_TYPE_NRCHARS,
FORMAT_TYPE_SIZE_T,
FORMAT_TYPE_PTRDIFF
};
@@ -719,10 +718,15 @@ char *resource_string(char *buf, char *end, struct resource *res,
specp = &mem_spec;
decode = 0;
}
- p = number(p, pend, res->start, *specp);
- if (res->start != res->end) {
- *p++ = '-';
- p = number(p, pend, res->end, *specp);
+ if (decode && res->flags & IORESOURCE_UNSET) {
+ p = string(p, pend, "size ", str_spec);
+ p = number(p, pend, resource_size(res), *specp);
+ } else {
+ p = number(p, pend, res->start, *specp);
+ if (res->start != res->end) {
+ *p++ = '-';
+ p = number(p, pend, res->end, *specp);
+ }
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
@@ -1533,10 +1537,6 @@ qualifier:
return fmt - start;
/* skip alnum */
- case 'n':
- spec->type = FORMAT_TYPE_NRCHARS;
- return ++fmt - start;
-
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
return ++fmt - start;
@@ -1559,6 +1559,15 @@ qualifier:
case 'u':
break;
+ case 'n':
+ /*
+ * Since %n poses a greater security risk than utility, treat
+ * it as an invalid format specifier. Warn about its use so
+ * that new instances don't get added.
+ */
+ WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt);
+ /* Fall-through */
+
default:
spec->type = FORMAT_TYPE_INVALID;
return fmt - start;
@@ -1732,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
break;
- case FORMAT_TYPE_NRCHARS: {
- /*
- * Since %n poses a greater security risk than
- * utility, ignore %n and skip its argument.
- */
- void *skip_arg;
-
- WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
- old_fmt);
-
- skip_arg = va_arg(args, void *);
- break;
- }
-
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
@@ -2020,19 +2015,6 @@ do { \
fmt++;
break;
- case FORMAT_TYPE_NRCHARS: {
- /* skip %n 's argument */
- u8 qualifier = spec.qualifier;
- void *skip_arg;
- if (qualifier == 'l')
- skip_arg = va_arg(args, long *);
- else if (_tolower(qualifier) == 'z')
- skip_arg = va_arg(args, size_t *);
- else
- skip_arg = va_arg(args, int *);
- break;
- }
-
default:
switch (spec.type) {
@@ -2191,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
++str;
break;
- case FORMAT_TYPE_NRCHARS:
- /* skip */
- break;
-
default: {
unsigned long long num;
@@ -2369,7 +2347,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
base = 10;
- is_sign = 0;
+ is_sign = false;
switch (*fmt++) {
case 'c':
@@ -2408,7 +2386,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
case 'i':
base = 0;
case 'd':
- is_sign = 1;
+ is_sign = true;
case 'u':
break;
case '%':
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 08837db52d94..12d2d777f36b 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -9,33 +9,33 @@ config XZ_DEC
if XZ_DEC
config XZ_DEC_X86
- bool "x86 BCJ filter decoder"
- default y if X86
+ bool "x86 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_POWERPC
- bool "PowerPC BCJ filter decoder"
- default y if PPC
+ bool "PowerPC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder"
- default y if IA64
+ bool "IA-64 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARM
- bool "ARM BCJ filter decoder"
- default y if ARM
+ bool "ARM BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARMTHUMB
- bool "ARM-Thumb BCJ filter decoder"
- default y if (ARM && ARM_THUMB)
+ bool "ARM-Thumb BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_SPARC
- bool "SPARC BCJ filter decoder"
- default y if SPARC
+ bool "SPARC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index a6cdc969ea42..08c3c8049998 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1043,6 +1043,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ /* Fall through */
+
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
return XZ_DATA_ERROR;
@@ -1053,6 +1055,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
+ /* Fall through */
+
case SEQ_LZMA_RUN:
/*
* Set dictionary limit to indicate how much we want