summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig57
-rw-r--r--lib/Kconfig.debug95
-rw-r--r--lib/Makefile12
-rw-r--r--lib/asn1_decoder.c2
-rw-r--r--lib/atomic64_test.c13
-rw-r--r--lib/bitmap.c111
-rw-r--r--lib/btree.c1
-rw-r--r--lib/bug.c21
-rw-r--r--lib/cmdline.c15
-rw-r--r--lib/cpumask.c63
-rw-r--r--lib/crc32.c157
-rw-r--r--lib/crc7.c84
-rw-r--r--lib/debugobjects.c19
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/decompress_bunzip2.c26
-rw-r--r--lib/decompress_inflate.c12
-rw-r--r--lib/decompress_unlz4.c83
-rw-r--r--lib/decompress_unlzma.c28
-rw-r--r--lib/decompress_unlzo.c12
-rw-r--r--lib/decompress_unxz.c10
-rw-r--r--lib/devres.c36
-rw-r--r--lib/digsig.c5
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/glob.c287
-rw-r--r--lib/idr.c65
-rw-r--r--lib/interval_tree.c6
-rw-r--r--lib/interval_tree_test.c (renamed from lib/interval_tree_test_main.c)0
-rw-r--r--lib/iovec.c59
-rw-r--r--lib/kfifo.c6
-rw-r--r--lib/klist.c6
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/libcrc32c.c5
-rw-r--r--lib/list_sort.c71
-rw-r--r--lib/lockref.c3
-rw-r--r--lib/lz4/lz4_decompress.c12
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c62
-rw-r--r--lib/net_utils.c10
-rw-r--r--lib/nlattr.c17
-rw-r--r--lib/percpu-refcount.c86
-rw-r--r--lib/plist.c56
-rw-r--r--lib/radix-tree.c13
-rw-r--r--lib/random32.c49
-rw-r--r--lib/rbtree.c2
-rw-r--r--lib/rhashtable.c797
-rw-r--r--lib/scatterlist.c29
-rw-r--r--lib/string.c26
-rw-r--r--lib/string_helpers.c15
-rw-r--r--lib/swiotlb.c30
-rw-r--r--lib/test-kstrtox.c2
-rw-r--r--lib/test_bpf.c1929
-rw-r--r--lib/test_firmware.c117
-rw-r--r--lib/textsearch.c9
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xz/Kconfig24
-rw-r--r--lib/xz/xz_dec_lzma2.c4
-rw-r--r--lib/zlib_deflate/deflate.c143
-rw-r--r--lib/zlib_inflate/inflate.c132
58 files changed, 4098 insertions, 858 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 4771fb3f4da4..a5ce0c7f6c30 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -331,6 +331,20 @@ config TEXTSEARCH_FSM
config BTREE
boolean
+config INTERVAL_TREE
+ boolean
+ help
+ Simple, embeddable, interval-tree. Can find the start of an
+ overlapping range in log(n) time and then iterate over all
+ overlapping nodes. The algorithm is implemented as an
+ augmented rbtree.
+
+ See:
+
+ Documentation/rbtree.txt
+
+ for more information.
+
config ASSOCIATIVE_ARRAY
bool
help
@@ -382,6 +396,39 @@ config CPU_RMAP
config DQL
bool
+config GLOB
+ bool
+# This actually supports modular compilation, but the module overhead
+# is ridiculous for the amount of code involved. Until an out-of-tree
+# driver asks for it, we'll just link it directly it into the kernel
+# when required. Since we're ignoring out-of-tree users, there's also
+# no need bother prompting for a manual decision:
+# prompt "glob_match() function"
+ help
+ This option provides a glob_match function for performing
+ simple text pattern matching. It originated in the ATA code
+ to blacklist particular drive models, but other device drivers
+ may need similar functionality.
+
+ All drivers in the Linux kernel tree that require this function
+ should automatically select this option. Say N unless you
+ are compiling an out-of tree driver which tells you that it
+ depends on this.
+
+config GLOB_SELFTEST
+ bool "glob self-test on init"
+ default n
+ depends on GLOB
+ help
+ This option enables a simple self-test of the glob_match
+ function on startup. It is primarily useful for people
+ working on the code to ensure they haven't introduced any
+ regressions.
+
+ It only adds a little bit of code and slows kernel boot (or
+ module load) by a small amount, so you're welcome to play with
+ it, but you probably don't need it.
+
#
# Netlink attribute parsing support is select'ed if needed
#
@@ -437,7 +484,8 @@ config MPILIB
config SIGNATURE
tristate
- depends on KEYS && CRYPTO
+ depends on KEYS
+ select CRYPTO
select CRYPTO_SHA1
select MPILIB
help
@@ -460,4 +508,11 @@ config UCS2_STRING
source "lib/fonts/Kconfig"
+#
+# sg chaining option
+#
+
+config ARCH_HAS_SG_CHAIN
+ def_bool n
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 819ac51202c0..cb45f59685e6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -15,7 +15,7 @@ config PRINTK_TIME
The behavior is also controlled by the kernel command line
parameter printk.time=1. See Documentation/kernel-parameters.txt
-config DEFAULT_MESSAGE_LOGLEVEL
+config MESSAGE_LOGLEVEL_DEFAULT
int "Default message log level (1-7)"
range 1 7
default "4"
@@ -501,6 +501,16 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VM_VMACACHE
+ bool "Debug VMA caching"
+ depends on DEBUG_VM
+ help
+ Enable this to turn on VMA caching debug information. Doing so
+ can cause significant overhead, so only enable it in non-production
+ environments.
+
+ If unsure, say N.
+
config DEBUG_VM_RB
bool "Debug VM red-black trees"
depends on DEBUG_VM
@@ -575,8 +585,8 @@ config DEBUG_HIGHMEM
bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM
help
- This options enables addition error checking for high memory systems.
- Disable for production systems.
+ This option enables additional error checking for high memory
+ systems. Disable for production systems.
config HAVE_DEBUG_STACKOVERFLOW
bool
@@ -823,14 +833,9 @@ config DEBUG_RT_MUTEXES
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
-config DEBUG_PI_LIST
- bool
- default y
- depends on DEBUG_RT_MUTEXES
-
config RT_MUTEX_TESTER
bool "Built-in scriptable tester for rt-mutexes"
- depends on DEBUG_KERNEL && RT_MUTEXES
+ depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
help
This option enables a rt-mutex tester.
@@ -925,7 +930,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
select KALLSYMS
select KALLSYMS_ALL
@@ -1053,6 +1058,16 @@ config DEBUG_LIST
If unsure, say N.
+config DEBUG_PI_LIST
+ bool "Debug priority linked list manipulation"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the priority-ordered
+ linked-list (plist) walking routines. This checks the entire
+ list multiple times during each manipulation.
+
+ If unsure, say N.
+
config DEBUG_SG
bool "Debug SG table operations"
depends on DEBUG_KERNEL
@@ -1116,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY
Say N if you are unsure.
-config PROVE_RCU_DELAY
- bool "RCU debugging: preemptible RCU race provocation"
- depends on DEBUG_KERNEL && PREEMPT_RCU
- default n
- help
- There is a class of races that involve an unlikely preemption
- of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
- been set to INT_MIN. This feature inserts a delay at that
- point to increase the probability of these races.
-
- Say Y to increase probability of preemption of __rcu_read_unlock().
-
- Say N if you are unsure.
-
config SPARSE_RCU_POINTER
bool "RCU debugging: sparse-based checks for pointer usage"
default n
@@ -1393,7 +1394,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
help
Provide stacktrace filter for fault-injection capabilities
@@ -1496,6 +1497,7 @@ config RBTREE_TEST
config INTERVAL_TREE_TEST
tristate "Interval tree test"
depends on m && DEBUG_KERNEL
+ select INTERVAL_TREE
help
A benchmark measuring the performance of the interval tree library
@@ -1534,6 +1536,14 @@ config TEST_STRING_HELPERS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
+config TEST_RHASHTABLE
+ bool "Perform selftest on resizable hash table"
+ default n
+ help
+ Enable this option to test the rhashtable functions at boot.
+
+ If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
@@ -1620,6 +1630,41 @@ config TEST_USER_COPY
If unsure, say N.
+config TEST_BPF
+ tristate "Test BPF filter functionality"
+ default n
+ depends on m && NET
+ help
+ This builds the "test_bpf" module that runs various test vectors
+ against the BPF interpreter or BPF JIT compiler depending on the
+ current setting. This is in particular useful for BPF JIT compiler
+ development, but also to run regression tests against changes in
+ the interpreter code.
+
+ If unsure, say N.
+
+config TEST_FIRMWARE
+ tristate "Test firmware loading via userspace interface"
+ default n
+ depends on FW_LOADER
+ help
+ This builds the "test_firmware" module that creates a userspace
+ interface for testing firmware loading. This can be used to
+ control the triggering of firmware loading without needing an
+ actual firmware-using device. The contents can be rechecked by
+ userspace.
+
+ If unsure, say N.
+
+config TEST_UDELAY
+ tristate "udelay test driver"
+ default n
+ help
+ This builds the "udelay_test" module that helps to make sure
+ that udelay() is working properly.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 0cd7b68e1382..d6b4bc496408 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,13 +26,15 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o hash.o
+ percpu-refcount.o percpu_ida.o hash.o rhashtable.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_MODULE) += test_module.o
obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -50,6 +52,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
+obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -134,6 +137,8 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+obj-$(CONFIG_GLOB) += glob.o
+
obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_SIGNATURE) += digsig.o
@@ -148,7 +153,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
-libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
+ fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
@@ -156,8 +162,6 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
-interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
-
obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 11b9b01fda6b..1a000bb050f9 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -140,7 +140,7 @@ error:
* @decoder: The decoder definition (produced by asn1_compiler)
* @context: The caller's context (to be passed to the action functions)
* @data: The encoded data
- * @datasize: The size of the encoded data
+ * @datalen: The size of the encoded data
*
* Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
* produced by asn1_compiler. Action functions are called on marked tags to
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 00bca223d1e1..0211d30d8c39 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -8,6 +8,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/kernel.h>
@@ -146,18 +149,18 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r);
#ifdef CONFIG_X86
- printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
+ pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
- "x86-64",
+ "x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
- "i586+",
+ "i586+",
#else
- "i386+",
+ "i386+",
#endif
boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
- printk(KERN_INFO "atomic64 test passed\n");
+ pr_info("passed\n");
#endif
return 0;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06f7e4fe8d2d..1e031f2c9aba 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -40,9 +40,9 @@
* for the best explanations of this ordering.
*/
-int __bitmap_empty(const unsigned long *bitmap, int bits)
+int __bitmap_empty(const unsigned long *bitmap, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
@@ -55,9 +55,9 @@ int __bitmap_empty(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_empty);
-int __bitmap_full(const unsigned long *bitmap, int bits)
+int __bitmap_full(const unsigned long *bitmap, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
@@ -71,9 +71,9 @@ int __bitmap_full(const unsigned long *bitmap, int bits)
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
@@ -86,14 +86,14 @@ int __bitmap_equal(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_equal);
-void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
+void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
- dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
+ dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
@@ -182,23 +182,26 @@ void __bitmap_shift_left(unsigned long *dst,
EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
- for (k = 0; k < nr; k++)
+ for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
@@ -206,10 +209,10 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
@@ -217,22 +220,25 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k;
- int nr = BITS_TO_LONGS(bits);
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
- for (k = 0; k < nr; k++)
+ for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
@@ -245,9 +251,9 @@ int __bitmap_intersects(const unsigned long *bitmap1,
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, int bits)
+ const unsigned long *bitmap2, unsigned int bits)
{
- int k, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
@@ -259,9 +265,10 @@ int __bitmap_subset(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_subset);
-int __bitmap_weight(const unsigned long *bitmap, int bits)
+int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
- int k, w = 0, lim = bits/BITS_PER_LONG;
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ int w = 0;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -273,42 +280,42 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
-void bitmap_set(unsigned long *map, int start, int nr)
+void bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (len - bits_to_set >= 0) {
*p |= mask_to_set;
- nr -= bits_to_set;
+ len -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
- if (nr) {
+ if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
-void bitmap_clear(unsigned long *map, int start, int nr)
+void bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
- nr -= bits_to_clear;
+ len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
- if (nr) {
+ if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
@@ -664,13 +671,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
- char *nl = strchr(bp, '\n');
- int len;
-
- if (nl)
- len = nl - bp;
- else
- len = strlen(bp);
+ char *nl = strchrnul(bp, '\n');
+ int len = nl - bp;
return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
}
@@ -716,7 +718,7 @@ EXPORT_SYMBOL(bitmap_parselist_user);
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
- * and other @pos values will get mapped to 0. When @pos value 7
+ * and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
@@ -1046,7 +1048,7 @@ enum {
REG_OP_RELEASE, /* clear all bits in region */
};
-static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
+static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
@@ -1112,11 +1114,11 @@ done:
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
- int pos, end; /* scans bitmap by regions of size order */
+ unsigned int pos, end; /* scans bitmap by regions of size order */
- for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
+ for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
@@ -1137,7 +1139,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
*
* No return value.
*/
-void bitmap_release_region(unsigned long *bitmap, int pos, int order)
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
@@ -1154,12 +1156,11 @@ EXPORT_SYMBOL(bitmap_release_region);
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
- __reg_op(bitmap, pos, order, REG_OP_ALLOC);
- return 0;
+ return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
}
EXPORT_SYMBOL(bitmap_allocate_region);
diff --git a/lib/btree.c b/lib/btree.c
index f9a484676cb6..4264871ea1a0 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
void btree_destroy(struct btree_head *head)
{
+ mempool_free(head->node, head->mempool);
mempool_destroy(head->mempool);
head->mempool = NULL;
}
diff --git a/lib/bug.c b/lib/bug.c
index 168603477f02..d1d7c7878900 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,9 @@
Jeremy Fitzhardinge <jeremy@goop.org> 2006
*/
+
+#define pr_fmt(fmt) fmt
+
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -153,15 +156,13 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
- printk(KERN_WARNING "------------[ cut here ]------------\n");
+ pr_warn("------------[ cut here ]------------\n");
if (file)
- printk(KERN_WARNING "WARNING: at %s:%u\n",
- file, line);
+ pr_warn("WARNING: at %s:%u\n", file, line);
else
- printk(KERN_WARNING "WARNING: at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
print_modules();
show_regs(regs);
@@ -174,12 +175,10 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
printk(KERN_DEFAULT "------------[ cut here ]------------\n");
if (file)
- printk(KERN_CRIT "kernel BUG at %s:%u!\n",
- file, line);
+ pr_crit("kernel BUG at %s:%u!\n", file, line);
else
- printk(KERN_CRIT "Kernel BUG at %p "
- "[verbose debug info unavailable]\n",
- (void *)bugaddr);
+ pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
+ (void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index d4932f745e92..76a712e6e20e 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -121,11 +121,7 @@ EXPORT_SYMBOL(get_options);
* @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
- * potentially suffixed with %K (for kilobytes, or 1024 bytes),
- * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or
- * 1073741824). If the number is suffixed with K, M, or G, then
- * the return value is the number multiplied by one kilobyte, one
- * megabyte, or one gigabyte, respectively.
+ * potentially suffixed with K, M, G, T, P, E.
*/
unsigned long long memparse(const char *ptr, char **retptr)
@@ -135,6 +131,15 @@ unsigned long long memparse(const char *ptr, char **retptr)
unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ case 'T':
+ case 't':
+ ret <<= 10;
case 'G':
case 'g':
ret <<= 10;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index b810b753c607..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -164,3 +164,66 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
memblock_free_early(__pa(mask), cpumask_size());
}
#endif
+
+/**
+ * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
+ *
+ * @i: index number
+ * @numa_node: local numa_node
+ * @dstp: cpumask with the relevant cpu bit set according to the policy
+ *
+ * This function sets the cpumask according to a numa aware policy.
+ * cpumask could be used as an affinity hint for the IRQ related to a
+ * queue. When the policy is to spread queues across cores - local cores
+ * first.
+ *
+ * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
+ * the cpu bit and need to re-call the function.
+ */
+int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+{
+ cpumask_var_t mask;
+ int cpu;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ i %= num_online_cpus();
+
+ if (numa_node == -1 || !cpumask_of_node(numa_node)) {
+ /* Use all online cpu's for non numa aware system */
+ cpumask_copy(mask, cpu_online_mask);
+ } else {
+ int n;
+
+ cpumask_and(mask,
+ cpumask_of_node(numa_node), cpu_online_mask);
+
+ n = cpumask_weight(mask);
+ if (i >= n) {
+ i -= n;
+
+ /* If index > number of local cpu's, mask out local
+ * cpu's
+ */
+ cpumask_andnot(mask, cpu_online_mask, mask);
+ }
+ }
+
+ for_each_cpu(cpu, mask) {
+ if (--i < 0)
+ goto out;
+ }
+
+ ret = -EAGAIN;
+
+out:
+ free_cpumask_var(mask);
+
+ if (!ret)
+ cpumask_set_cpu(cpu, dstp);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpumask_set_cpu_local_first);
diff --git a/lib/crc32.c b/lib/crc32.c
index 70f00ca5ef1e..9a907d489d95 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -33,13 +33,13 @@
#include "crc32defs.h"
#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) __constant_cpu_to_le32(x))
+# define tole(x) ((__force u32) cpu_to_le32(x))
#else
# define tole(x) (x)
#endif
#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) __constant_cpu_to_be32(x))
+# define tobe(x) ((__force u32) cpu_to_be32(x))
#else
# define tobe(x) (x)
#endif
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#define GF2_DIM 32
-
-static u32 gf2_matrix_times(u32 *mat, u32 vec)
-{
- u32 sum = 0;
-
- while (vec) {
- if (vec & 1)
- sum ^= *mat;
- vec >>= 1;
- mat++;
- }
-
- return sum;
-}
-
-static void gf2_matrix_square(u32 *square, u32 *mat)
-{
- int i;
-
- for (i = 0; i < GF2_DIM; i++)
- square[i] = gf2_matrix_times(mat, mat[i]);
-}
-
#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32
+static inline u32 __pure
crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
{
# ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
}
#endif
-/* For conditions of distribution and use, see copyright notice in zlib.h */
-static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
- u32 polynomial)
-{
- u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
- u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
- u32 row;
- int i;
-
- if (len2 <= 0)
- return crc1;
-
- /* Put operator for one zero bit in odd */
- odd[0] = polynomial;
- row = 1;
- for (i = 1; i < GF2_DIM; i++) {
- odd[i] = row;
- row <<= 1;
- }
-
- gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
- gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
-
- /* Apply len2 zeros to crc1 (first square will put the operator for one
- * zero byte, eight zero bits, in even).
- */
- do {
- /* Apply zeros operator for this bit of len2 */
- gf2_matrix_square(even, odd);
- if (len2 & 1)
- crc1 = gf2_matrix_times(even, crc1);
- len2 >>= 1;
- /* If no more bits set, then done */
- if (len2 == 0)
- break;
- /* Another iteration of the loop with odd and even swapped */
- gf2_matrix_square(odd, even);
- if (len2 & 1)
- crc1 = gf2_matrix_times(odd, crc1);
- len2 >>= 1;
- } while (len2 != 0);
-
- crc1 ^= crc2;
- return crc1;
-}
/**
* crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
}
#endif
-u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
{
- return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+ u32 product = x & 1 ? y : 0;
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ product = (product >> 1) ^ (product & 1 ? modulus : 0);
+ x >>= 1;
+ product ^= x & 1 ? y : 0;
+ }
+
+ return product;
}
-u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+/**
+ * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+ u32 polynomial)
{
- return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+ u32 power = polynomial; /* CRC of x^32 */
+ int i;
+
+ /* Shift up to 32 bits in the simple linear way */
+ for (i = 0; i < 8 * (int)(len & 3); i++)
+ crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+ len >>= 2;
+ if (!len)
+ return crc;
+
+ for (;;) {
+ /* "power" is x^(2^i), modulo the polynomial */
+ if (len & 1)
+ crc = gf2_multiply(crc, power, polynomial);
+
+ len >>= 1;
+ if (!len)
+ break;
+
+ /* Square power, advancing to x^(2^(i+1)) */
+ power = gf2_multiply(power, power, polynomial);
+ }
+
+ return crc;
}
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(crc32_le_combine);
-EXPORT_SYMBOL(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_combine);
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRCPOLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
/**
* crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
#ifdef CONFIG_CRC32_SELFTEST
/* 4096 random bytes */
-static u8 __attribute__((__aligned__(8))) test_buf[] =
+static u8 const __aligned(8) test_buf[] __initconst =
{
0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
u32 crc_le; /* expected crc32_le result */
u32 crc_be; /* expected crc32_be result */
u32 crc32c_le; /* expected crc32c_le result */
-} test[] =
+} const test[] __initconst =
{
{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
diff --git a/lib/crc7.c b/lib/crc7.c
index f1c3a144cec1..bf6255e23919 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -10,42 +10,47 @@
#include <linux/crc7.h>
-/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
-const u8 crc7_syndrome_table[256] = {
- 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
- 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
- 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
- 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
- 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
- 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
- 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
- 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
- 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
- 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
- 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
- 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
- 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
- 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
- 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
- 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
- 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
- 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
- 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
- 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
- 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
- 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
- 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
- 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
- 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
- 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
- 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
- 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
- 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
- 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
- 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
- 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
+/*
+ * Table for CRC-7 (polynomial x^7 + x^3 + 1).
+ * This is a big-endian CRC (msbit is highest power of x),
+ * aligned so the msbit of the byte is the x^6 coefficient
+ * and the lsbit is not used.
+ */
+const u8 crc7_be_syndrome_table[256] = {
+ 0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
+ 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
+ 0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
+ 0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
+ 0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
+ 0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
+ 0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
+ 0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
+ 0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
+ 0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
+ 0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
+ 0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
+ 0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
+ 0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
+ 0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
+ 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
+ 0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
+ 0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
+ 0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
+ 0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
+ 0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
+ 0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
+ 0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
+ 0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
+ 0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
+ 0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
+ 0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
+ 0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
+ 0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
+ 0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
+ 0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
+ 0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
-EXPORT_SYMBOL(crc7_syndrome_table);
+EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7 - update the CRC7 for the data buffer
@@ -55,14 +60,17 @@ EXPORT_SYMBOL(crc7_syndrome_table);
* Context: any
*
* Returns the updated CRC7 value.
+ * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
+ * makes the computation easier, and all callers want it in that form.
+ *
*/
-u8 crc7(u8 crc, const u8 *buffer, size_t len)
+u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
- crc = crc7_byte(crc, *buffer++);
+ crc = crc7_be_byte(crc, *buffer++);
return crc;
}
-EXPORT_SYMBOL(crc7);
+EXPORT_SYMBOL(crc7_be);
MODULE_DESCRIPTION("CRC7 calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e0731c3db706..547f7f923dbc 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -7,6 +7,9 @@
*
* For licencing details see kernel-base/COPYING
*/
+
+#define pr_fmt(fmt) "ODEBUG: " fmt
+
#include <linux/debugobjects.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -218,7 +221,7 @@ static void debug_objects_oom(void)
unsigned long flags;
int i;
- printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+ pr_warn("Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
raw_spin_lock_irqsave(&db->lock, flags);
@@ -292,11 +295,9 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- printk(KERN_WARNING
- "ODEBUG: object is on stack, but not annotated\n");
+ pr_warn("object is on stack, but not annotated\n");
else
- printk(KERN_WARNING
- "ODEBUG: object is not on stack, but annotated\n");
+ pr_warn("object is not on stack, but annotated\n");
WARN_ON(1);
}
@@ -985,7 +986,7 @@ static void __init debug_objects_selftest(void)
if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
goto out;
#endif
- printk(KERN_INFO "ODEBUG: selftest passed\n");
+ pr_info("selftest passed\n");
out:
debug_objects_fixups = oldfixups;
@@ -1060,8 +1061,8 @@ static int __init debug_objects_replace_static_objects(void)
}
local_irq_enable();
- printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
- obj_pool_used);
+ pr_debug("%d of %d active objects replaced\n",
+ cnt, obj_pool_used);
return 0;
free:
hlist_for_each_entry_safe(obj, tmp, &objects, node) {
@@ -1090,7 +1091,7 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
if (obj_cache)
kmem_cache_destroy(obj_cache);
- printk(KERN_WARNING "ODEBUG: out of memory.\n");
+ pr_warn("out of memory.\n");
} else
debug_objects_selftest();
}
diff --git a/lib/decompress.c b/lib/decompress.c
index 86069d74c062..37f3c786348f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -54,7 +54,7 @@ static const struct compress_format compressed_formats[] __initconst = {
{ {0, 0}, NULL, NULL }
};
-decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
+decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
const char **name)
{
const struct compress_format *cf;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 31c5f7675fbf..8290e0bef7ea 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -92,8 +92,8 @@ struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
- int (*fill)(void*, unsigned int);
- int inbufCount, inbufPos /*, outbufPos*/;
+ long (*fill)(void*, unsigned long);
+ long inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
@@ -617,7 +617,7 @@ decode_next_byte:
goto decode_next_byte;
}
-static int INIT nofill(void *buf, unsigned int len)
+static long INIT nofill(void *buf, unsigned long len)
{
return -1;
}
@@ -625,8 +625,8 @@ static int INIT nofill(void *buf, unsigned int len)
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
-static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
- int (*fill)(void*, unsigned int))
+static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
+ long (*fill)(void*, unsigned long))
{
struct bunzip_data *bd;
unsigned int i, j, c;
@@ -675,11 +675,11 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
-STATIC int INIT bunzip2(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT bunzip2(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *outbuf,
- int *pos,
+ long *pos,
void(*error)(char *x))
{
struct bunzip_data *bd;
@@ -743,11 +743,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *outbuf,
- int *pos,
+ long *pos,
void(*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 0edfd742a154..d4c7891635ec 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -27,17 +27,17 @@
#define GZIP_IOBUF_SIZE (16*1024)
-static int INIT nofill(void *buffer, unsigned int len)
+static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
-STATIC int INIT gunzip(unsigned char *buf, int len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *out_buf,
- int *pos,
+ long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
@@ -142,7 +142,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
- int l = strm->next_out - out_buf;
+ long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 7d1e83caf8ad..40f66ebe57b7 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -31,10 +31,10 @@
#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
#define ARCHIVE_MAGICNUMBER 0x184C2102
-STATIC inline int INIT unlz4(u8 *input, int in_len,
- int (*fill) (void *, unsigned int),
- int (*flush) (void *, unsigned int),
- u8 *output, int *posp,
+STATIC inline int INIT unlz4(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
void (*error) (char *x))
{
int ret = -1;
@@ -43,7 +43,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
u8 *inp;
u8 *inp_start;
u8 *outp;
- int size = in_len;
+ long size = in_len;
#ifdef PREBOOT
size_t out_len = get_unaligned_le32(input + in_len);
#endif
@@ -83,13 +83,20 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp = 0;
- if (fill)
- fill(inp, 4);
+ if (fill) {
+ size = fill(inp, 4);
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
- inp += 4;
- size -= 4;
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
} else {
error("invalid header");
goto exit_2;
@@ -100,29 +107,44 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
for (;;) {
- if (fill)
- fill(inp, 4);
+ if (fill) {
+ size = fill(inp, 4);
+ if (size == 0)
+ break;
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
- inp += 4;
- size -= 4;
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
if (posp)
*posp += 4;
continue;
}
- inp += 4;
- size -= 4;
+
if (posp)
*posp += 4;
- if (fill) {
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ } else {
if (chunksize > lz4_compressbound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
- fill(inp, chunksize);
+ size = fill(inp, chunksize);
+ if (size < chunksize) {
+ error("data corrupted");
+ goto exit_2;
+ }
}
#ifdef PREBOOT
if (out_len >= uncomp_chunksize) {
@@ -149,18 +171,17 @@ STATIC inline int INIT unlz4(u8 *input, int in_len,
if (posp)
*posp += chunksize;
- size -= chunksize;
+ if (!fill) {
+ size -= chunksize;
- if (size == 0)
- break;
- else if (size < 0) {
- error("data corrupted");
- goto exit_2;
+ if (size == 0)
+ break;
+ else if (size < 0) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ inp += chunksize;
}
-
- inp += chunksize;
- if (fill)
- inp = inp_start;
}
ret = 0;
@@ -175,11 +196,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32adb73a9038..0be83af62b88 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -65,11 +65,11 @@ static long long INIT read_int(unsigned char *ptr, int size)
#define LZMA_IOBUF_SIZE 0x10000
struct rc {
- int (*fill)(void*, unsigned int);
+ long (*fill)(void*, unsigned long);
uint8_t *ptr;
uint8_t *buffer;
uint8_t *buffer_end;
- int buffer_size;
+ long buffer_size;
uint32_t code;
uint32_t range;
uint32_t bound;
@@ -82,7 +82,7 @@ struct rc {
#define RC_MODEL_TOTAL_BITS 11
-static int INIT nofill(void *buffer, unsigned int len)
+static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
@@ -99,8 +99,8 @@ static void INIT rc_read(struct rc *rc)
/* Called once */
static inline void INIT rc_init(struct rc *rc,
- int (*fill)(void*, unsigned int),
- char *buffer, int buffer_size)
+ long (*fill)(void*, unsigned long),
+ char *buffer, long buffer_size)
{
if (fill)
rc->fill = fill;
@@ -280,7 +280,7 @@ struct writer {
size_t buffer_pos;
int bufsize;
size_t global_pos;
- int(*flush)(void*, unsigned int);
+ long (*flush)(void*, unsigned long);
struct lzma_header *header;
};
@@ -534,11 +534,11 @@ static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
-STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
@@ -667,11 +667,11 @@ exit_0:
}
#ifdef PREBOOT
-STATIC int INIT decompress(unsigned char *buf, int in_len,
- int(*fill)(void*, unsigned int),
- int(*flush)(void*, unsigned int),
+STATIC int INIT decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
unsigned char *output,
- int *posp,
+ long *posp,
void(*error)(char *x)
)
{
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index 960183d4258f..b94a31bdd87d 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -51,7 +51,7 @@ static const unsigned char lzop_magic[] = {
#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
-STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
+STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
{
int l;
u8 *parse = input;
@@ -108,14 +108,14 @@ STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
return 1;
}
-STATIC inline int INIT unlzo(u8 *input, int in_len,
- int (*fill) (void *, unsigned int),
- int (*flush) (void *, unsigned int),
- u8 *output, int *posp,
+STATIC int INIT unlzo(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
void (*error) (char *x))
{
u8 r = 0;
- int skip = 0;
+ long skip = 0;
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 9f34eb56854d..b07a78340e9d 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -248,10 +248,10 @@ void *memmove(void *dest, const void *src, size_t size)
* both input and output buffers are available as a single chunk, i.e. when
* fill() and flush() won't be used.
*/
-STATIC int INIT unxz(unsigned char *in, int in_size,
- int (*fill)(void *dest, unsigned int size),
- int (*flush)(void *src, unsigned int size),
- unsigned char *out, int *in_used,
+STATIC int INIT unxz(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long *in_used,
void (*error)(char *x))
{
struct xz_buf b;
@@ -329,7 +329,7 @@ STATIC int INIT unxz(unsigned char *in, int in_size,
* returned by xz_dec_run(), but probably
* it's not too bad.
*/
- if (flush(b.out, b.out_pos) != (int)b.out_pos)
+ if (flush(b.out, b.out_pos) != (long)b.out_pos)
ret = XZ_BUF_ERROR;
b.out_pos = 0;
diff --git a/lib/devres.c b/lib/devres.c
index 2f16c133fd36..f4a195a6efe4 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
@@ -142,34 +140,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
EXPORT_SYMBOL(devm_ioremap_resource);
-/**
- * devm_request_and_ioremap() - Check, request region, and ioremap resource
- * @dev: Generic device to handle the resource for
- * @res: resource to be handled
- *
- * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
- * everything is undone on driver detach. Checks arguments, so you can feed
- * it the result from e.g. platform_get_resource() directly. Returns the
- * remapped pointer or NULL on error. Usage example:
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_request_and_ioremap(&pdev->dev, res);
- * if (!base)
- * return -EADDRNOTAVAIL;
- */
-void __iomem *devm_request_and_ioremap(struct device *device,
- struct resource *res)
-{
- void __iomem *dest_ptr;
-
- dest_ptr = devm_ioremap_resource(device, res);
- if (IS_ERR(dest_ptr))
- return NULL;
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_request_and_ioremap);
-
#ifdef CONFIG_HAS_IOPORT_MAP
/*
* Generic iomap devres
@@ -194,7 +164,7 @@ static int devm_ioport_map_match(struct device *dev, void *res,
* Managed ioport_map(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
unsigned int nr)
{
void __iomem **ptr, *addr;
@@ -265,7 +235,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* be safely called without context and guaranteed to succed once
* allocated.
*/
-void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
struct pcim_iomap_devres *dr, *new_dr;
@@ -290,7 +260,7 @@ EXPORT_SYMBOL(pcim_iomap_table);
* Managed pci_iomap(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
void __iomem **tbl;
diff --git a/lib/digsig.c b/lib/digsig.c
index 8793aeda30ca..ae05ea393fc8 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -175,10 +175,11 @@ err1:
* digsig_verify() - digital signature verification with public key
* @keyring: keyring to search key in
* @sig: digital signature
- * @sigen: length of the signature
+ * @siglen: length of the signature
* @data: data
* @datalen: length of the data
- * @return: 0 on success, -EINVAL otherwise
+ *
+ * Returns 0 on success, -EINVAL otherwise
*
* Verifies data integrity against digital signature.
* Currently only RSA is supported.
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7288e38e1757..c9afbe2c445a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
char buf[PREFIX_SIZE];
res = dev_printk_emit(7, dev->dev.parent,
- "%s%s %s %s: %pV",
+ "%s%s %s %s%s: %pV",
dynamic_emit_prefix(descriptor, buf),
dev_driver_string(dev->dev.parent),
dev_name(dev->dev.parent),
- netdev_name(dev), &vaf);
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
} else if (dev) {
- res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+ res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
} else {
res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
}
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000000..5d30c58150ad
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/glob.c b/lib/glob.c
new file mode 100644
index 000000000000..500fc80d23e1
--- /dev/null
+++ b/lib/glob.c
@@ -0,0 +1,287 @@
+#include <linux/module.h>
+#include <linux/glob.h>
+
+/*
+ * The only reason this code can be compiled as a module is because the
+ * ATA code that depends on it can be as well. In practice, they're
+ * both usually compiled in and the module overhead goes away.
+ */
+MODULE_DESCRIPTION("glob(7) matching");
+MODULE_LICENSE("Dual MIT/GPL");
+
+/**
+ * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
+ * @pat: Shell-style pattern to match, e.g. "*.[ch]".
+ * @str: String to match. The pattern must match the entire string.
+ *
+ * Perform shell-style glob matching, returning true (1) if the match
+ * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
+ *
+ * Pattern metacharacters are ?, *, [ and \.
+ * (And, inside character classes, !, - and ].)
+ *
+ * This is small and simple implementation intended for device blacklists
+ * where a string is matched against a number of patterns. Thus, it
+ * does not preprocess the patterns. It is non-recursive, and run-time
+ * is at most quadratic: strlen(@str)*strlen(@pat).
+ *
+ * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
+ * it takes 6 passes over the pattern before matching the string.
+ *
+ * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
+ * treat / or leading . specially; it isn't actually used for pathnames.
+ *
+ * Note that according to glob(7) (and unlike bash), character classes
+ * are complemented by a leading !; this does not support the regex-style
+ * [^a-z] syntax.
+ *
+ * An opening bracket without a matching close is matched literally.
+ */
+bool __pure glob_match(char const *pat, char const *str)
+{
+ /*
+ * Backtrack to previous * on mismatch and retry starting one
+ * character later in the string. Because * matches all characters
+ * (no exception for /), it can be easily proved that there's
+ * never a need to backtrack multiple levels.
+ */
+ char const *back_pat = NULL, *back_str = back_str;
+
+ /*
+ * Loop over each token (character or class) in pat, matching
+ * it against the remaining unmatched tail of str. Return false
+ * on mismatch, or true after matching the trailing nul bytes.
+ */
+ for (;;) {
+ unsigned char c = *str++;
+ unsigned char d = *pat++;
+
+ switch (d) {
+ case '?': /* Wildcard: anything but nul */
+ if (c == '\0')
+ return false;
+ break;
+ case '*': /* Any-length wildcard */
+ if (*pat == '\0') /* Optimize trailing * case */
+ return true;
+ back_pat = pat;
+ back_str = --str; /* Allow zero-length match */
+ break;
+ case '[': { /* Character class */
+ bool match = false, inverted = (*pat == '!');
+ char const *class = pat + inverted;
+ unsigned char a = *class++;
+
+ /*
+ * Iterate over each span in the character class.
+ * A span is either a single character a, or a
+ * range a-b. The first span may begin with ']'.
+ */
+ do {
+ unsigned char b = a;
+
+ if (a == '\0') /* Malformed */
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ /* Any special action if a > b? */
+ }
+ match |= (a <= c && c <= b);
+ } while ((a = *class++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ }
+ break;
+ case '\\':
+ d = *pat++;
+ /*FALLTHROUGH*/
+ default: /* Literal character */
+literal:
+ if (c == d) {
+ if (d == '\0')
+ return true;
+ break;
+ }
+backtrack:
+ if (c == '\0' || !back_pat)
+ return false; /* No point continuing */
+ /* Try again from last *, one character later in str. */
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(glob_match);
+
+
+#ifdef CONFIG_GLOB_SELFTEST
+
+#include <linux/printk.h>
+#include <linux/moduleparam.h>
+
+/* Boot with "glob.verbose=1" to show successful tests, too */
+static bool verbose = false;
+module_param(verbose, bool, 0);
+
+struct glob_test {
+ char const *pat, *str;
+ bool expected;
+};
+
+static bool __pure __init test(char const *pat, char const *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* Can't get string literals into a particular section, so... */
+ static char const msg_error[] __initconst =
+ KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
+ static char const msg_ok[] __initconst =
+ KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
+ static char const mismatch[] __initconst = "mismatch";
+ char const *message;
+
+ if (!success)
+ message = msg_error;
+ else if (verbose)
+ message = msg_ok;
+ else
+ return success;
+
+ printk(message, pat, str, mismatch + 3*match);
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static char const glob_tests[] __initconst =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+static int __init glob_init(void)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const *p = glob_tests;
+ static char const message[] __initconst =
+ KERN_INFO "glob: %u self-tests passed, %u failed\n";
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const *pat = p;
+
+ p += strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ printk(message, successes, n);
+
+ /* What's the errno for "kernel bug detected"? Guess... */
+ return n ? -ECANCELED : 0;
+}
+
+/* We need a dummy exit function to allow unload */
+static void __exit glob_fini(void) { }
+
+module_init(glob_init);
+module_exit(glob_fini);
+
+#endif /* CONFIG_GLOB_SELFTEST */
diff --git a/lib/idr.c b/lib/idr.c
index 2642fa8e424d..50be3fa9b657 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -18,12 +18,6 @@
* pointer or what ever, we treat it as a (void *). You can pass this
* id to a user for him to pass back at a later time. You then pass
* that id to this code and it returns your pointer.
-
- * You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
- * don't need to go to the memory "store" during an id allocate, just
- * so you don't need to be too concerned about locking and conflicts
- * with the slab allocator.
*/
#ifndef TEST // to test in user space...
@@ -151,7 +145,7 @@ static void idr_layer_rcu_free(struct rcu_head *head)
static inline void free_layer(struct idr *idr, struct idr_layer *p)
{
- if (idr->hint && idr->hint == p)
+ if (idr->hint == p)
RCU_INIT_POINTER(idr->hint, NULL);
call_rcu(&p->rcu_head, idr_layer_rcu_free);
}
@@ -249,7 +243,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (id >= 1 << (idp->layers * IDR_BITS)) {
+ if (id > idr_max(idp->layers)) {
*starting_id = id;
return -EAGAIN;
}
@@ -562,6 +556,11 @@ void idr_remove(struct idr *idp, int id)
if (id < 0)
return;
+ if (id > idr_max(idp->layers)) {
+ idr_remove_warning(id);
+ return;
+ }
+
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
idp->top->ary[0]) {
@@ -579,16 +578,6 @@ void idr_remove(struct idr *idp, int id)
bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
free_layer(idp, to_free);
}
- while (idp->id_free_cnt >= MAX_IDR_FREE) {
- p = get_from_free_list(idp);
- /*
- * Note: we don't call the rcu callback here, since the only
- * layers that fall into the freelist are those that have been
- * preallocated.
- */
- kmem_cache_free(idr_layer_cache, p);
- }
- return;
}
EXPORT_SYMBOL(idr_remove);
@@ -601,26 +590,27 @@ static void __idr_remove_all(struct idr *idp)
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = idp->top;
+ *paa = idp->top;
RCU_INIT_POINTER(idp->top, NULL);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > IDR_BITS && p) {
n -= IDR_BITS;
- *paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
+ *++paa = p;
}
bt_mask = id;
id += 1 << n;
/* Get the highest bit that the above add changed from 0->1. */
while (n < fls(id ^ bt_mask)) {
- if (p)
- free_layer(idp, p);
+ if (*paa)
+ free_layer(idp, *paa);
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
idp->layers = 0;
@@ -703,15 +693,16 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = rcu_dereference_raw(idp->top);
+ *paa = rcu_dereference_raw(idp->top);
max = idr_max(idp->layers);
id = 0;
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
- *paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
+ *++paa = p;
}
if (p) {
@@ -723,7 +714,7 @@ int idr_for_each(struct idr *idp,
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
@@ -751,17 +742,18 @@ void *idr_get_next(struct idr *idp, int *nextidp)
int n, max;
/* find first ent */
- p = rcu_dereference_raw(idp->top);
+ p = *paa = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer + 1) * IDR_BITS;
max = idr_max(p->layer + 1);
while (id >= 0 && id <= max) {
+ p = *paa;
while (n > 0 && p) {
n -= IDR_BITS;
- *paa++ = p;
p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
+ *++paa = p;
}
if (p) {
@@ -779,7 +771,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
id = round_up(id + 1, 1 << n);
while (n < fls(id)) {
n += IDR_BITS;
- p = *--paa;
+ --paa;
}
}
return NULL;
@@ -809,14 +801,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
p = idp->top;
if (!p)
- return ERR_PTR(-EINVAL);
-
- n = (p->layer+1) * IDR_BITS;
+ return ERR_PTR(-ENOENT);
- if (id >= (1 << n))
- return ERR_PTR(-EINVAL);
+ if (id > idr_max(p->layer + 1))
+ return ERR_PTR(-ENOENT);
- n -= IDR_BITS;
+ n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
@@ -1027,6 +1017,9 @@ void ida_remove(struct ida *ida, int id)
int n;
struct ida_bitmap *bitmap;
+ if (idr_id > idr_max(ida->idr.layers))
+ goto err;
+
/* clear full bits while looking up the leaf idr_layer */
while ((shift > 0) && p) {
n = (idr_id >> shift) & IDR_MASK;
@@ -1042,7 +1035,7 @@ void ida_remove(struct ida *ida, int id)
__clear_bit(n, p->bitmap);
bitmap = (void *)p->ary[n];
- if (!test_bit(offset, bitmap->bitmap))
+ if (!bitmap || !test_bit(offset, bitmap->bitmap))
goto err;
/* update bitmap and remove it if empty */
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index e6eb406f2d65..f367f9ad544c 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -1,6 +1,7 @@
#include <linux/init.h>
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
+#include <linux/module.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
@@ -8,3 +9,8 @@
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
unsigned long, __subtree_last,
START, LAST,, interval_tree)
+
+EXPORT_SYMBOL_GPL(interval_tree_insert);
+EXPORT_SYMBOL_GPL(interval_tree_remove);
+EXPORT_SYMBOL_GPL(interval_tree_iter_first);
+EXPORT_SYMBOL_GPL(interval_tree_iter_next);
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test.c
index 245900b98c8e..245900b98c8e 100644
--- a/lib/interval_tree_test_main.c
+++ b/lib/interval_tree_test.c
diff --git a/lib/iovec.c b/lib/iovec.c
index 454baa88bf27..df3abd1eaa4a 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -51,3 +51,62 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
return 0;
}
EXPORT_SYMBOL(memcpy_toiovec);
+
+/*
+ * Copy kernel to iovec. Returns -EFAULT on error.
+ */
+
+int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
+ int offset, int len)
+{
+ int copy;
+ for (; len > 0; ++iov) {
+ /* Skip over the finished iovecs */
+ if (unlikely(offset >= iov->iov_len)) {
+ offset -= iov->iov_len;
+ continue;
+ }
+ copy = min_t(unsigned int, iov->iov_len - offset, len);
+ if (copy_to_user(iov->iov_base + offset, kdata, copy))
+ return -EFAULT;
+ offset = 0;
+ kdata += copy;
+ len -= copy;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovecend);
+
+/*
+ * Copy iovec to kernel. Returns -EFAULT on error.
+ */
+
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+ int offset, int len)
+{
+ /* No data? Done! */
+ if (len == 0)
+ return 0;
+
+ /* Skip over the finished iovecs */
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ iov++;
+ }
+
+ while (len > 0) {
+ u8 __user *base = iov->iov_base + offset;
+ int copy = min_t(unsigned int, len, iov->iov_len - offset);
+
+ offset = 0;
+ if (copy_from_user(kdata, base, copy))
+ return -EFAULT;
+ len -= copy;
+ kdata += copy;
+ iov++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/kfifo.c b/lib/kfifo.c
index d79b9d222065..90ba1eb1df06 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -561,8 +561,7 @@ EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- if (!nents)
- BUG();
+ BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
@@ -585,8 +584,7 @@ EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- if (!nents)
- BUG();
+ BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
diff --git a/lib/klist.c b/lib/klist.c
index 358a368a2947..89b485a2a58d 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -140,11 +140,11 @@ void klist_add_tail(struct klist_node *n, struct klist *k)
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
- * klist_add_after - Init a klist_node and add it after an existing node
+ * klist_add_behind - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
-void klist_add_after(struct klist_node *n, struct klist_node *pos)
+void klist_add_behind(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
@@ -153,7 +153,7 @@ void klist_add_after(struct klist_node *n, struct klist_node *pos)
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
-EXPORT_SYMBOL_GPL(klist_add_after);
+EXPORT_SYMBOL_GPL(klist_add_behind);
/**
* klist_add_before - Init a klist_node and add it before an existing node
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 4e3bd71bd949..9ebf9e20de53 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,7 +29,9 @@
u64 uevent_seqnum;
+#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+#endif
#ifdef CONFIG_NET
struct uevent_sock {
struct list_head list;
@@ -109,6 +111,7 @@ static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
}
#endif
+#ifdef CONFIG_UEVENT_HELPER
static int kobj_usermode_filter(struct kobject *kobj)
{
const struct kobj_ns_type_operations *ops;
@@ -147,6 +150,7 @@ static void cleanup_uevent_env(struct subprocess_info *info)
{
kfree(info->data);
}
+#endif
/**
* kobject_uevent_env - send an uevent with environmental data
@@ -323,6 +327,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
#endif
mutex_unlock(&uevent_sock_mutex);
+#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
struct subprocess_info *info;
@@ -347,6 +352,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
env = NULL; /* freed by cleanup_uevent_env */
}
}
+#endif
exit:
kfree(devpath);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 244f5480c898..b3131f5cf8a2 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -62,10 +62,7 @@ EXPORT_SYMBOL(crc32c);
static int __init libcrc32c_mod_init(void)
{
tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tfm);
}
static void __exit libcrc32c_mod_fini(void)
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 1183fa70a44d..12bcba1c8612 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -1,3 +1,6 @@
+
+#define pr_fmt(fmt) "list_sort_test: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list_sort.h>
@@ -47,6 +50,7 @@ static void merge_and_restore_back_links(void *priv,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
+ u8 count = 0;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
@@ -70,7 +74,8 @@ static void merge_and_restore_back_links(void *priv,
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
- (*cmp)(priv, tail->next, tail->next);
+ if (unlikely(!(++count)))
+ (*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
@@ -123,9 +128,7 @@ void list_sort(void *priv, struct list_head *head,
}
if (lev > max_lev) {
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
- printk_once(KERN_DEBUG "list passed to"
- " list_sort() too long for"
- " efficiency\n");
+ printk_once(KERN_DEBUG "list too long for efficiency\n");
lev--;
}
max_lev = lev;
@@ -168,27 +171,25 @@ static struct debug_el **elts __initdata;
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
- ela->serial);
+ pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
- elb->serial);
+ pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- printk(KERN_ERR "list_sort_test: error: phantom element\n");
+ pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
+ pr_err("error: bad poison: %#x/%#x\n",
+ ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
+ pr_err("error: bad poison: %#x/%#x\n",
+ elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
@@ -207,25 +208,23 @@ static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
static int __init list_sort_test(void)
{
- int i, count = 1, err = -EINVAL;
+ int i, count = 1, err = -ENOMEM;
struct debug_el *el;
- struct list_head *cur, *tmp;
+ struct list_head *cur;
LIST_HEAD(head);
- printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
+ pr_debug("start testing list_sort()\n");
- elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
+ elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
- printk(KERN_ERR "list_sort_test: error: cannot allocate "
- "memory\n");
- goto exit;
+ pr_err("error: cannot allocate memory\n");
+ return err;
}
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
- printk(KERN_ERR "list_sort_test: error: cannot "
- "allocate memory\n");
+ pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
@@ -239,52 +238,52 @@ static int __init list_sort_test(void)
list_sort(NULL, &head, cmp);
+ err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
if (cur->next->prev != cur) {
- printk(KERN_ERR "list_sort_test: error: list is "
- "corrupted\n");
+ pr_err("error: list is corrupted\n");
goto exit;
}
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
- printk(KERN_ERR "list_sort_test: error: list is not "
- "sorted\n");
+ pr_err("error: list is not sorted\n");
goto exit;
}
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
- printk(KERN_ERR "list_sort_test: error: order of "
- "equivalent elements not preserved\n");
+ pr_err("error: order of equivalent elements not "
+ "preserved\n");
goto exit;
}
if (check(el, el1)) {
- printk(KERN_ERR "list_sort_test: error: element check "
- "failed\n");
+ pr_err("error: element check failed\n");
goto exit;
}
count++;
}
+ if (head.prev != cur) {
+ pr_err("error: list is corrupted\n");
+ goto exit;
+ }
+
if (count != TEST_LIST_LEN) {
- printk(KERN_ERR "list_sort_test: error: bad list length %d",
- count);
+ pr_err("error: bad list length %d", count);
goto exit;
}
err = 0;
exit:
+ for (i = 0; i < TEST_LIST_LEN; i++)
+ kfree(elts[i]);
kfree(elts);
- list_for_each_safe(cur, tmp, &head) {
- list_del(cur);
- kfree(container_of(cur, struct debug_el, list));
- }
return err;
}
module_init(list_sort_test);
diff --git a/lib/lockref.c b/lib/lockref.c
index f07a40d33871..d2233de9a86e 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -1,6 +1,5 @@
#include <linux/export.h>
#include <linux/lockref.h>
-#include <linux/mutex.h>
#if USE_CMPXCHG_LOCKREF
@@ -29,7 +28,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- arch_mutex_cpu_relax(); \
+ cpu_relax_lowlatency(); \
} \
} while (0)
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..7a85967060a5 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
len = *ip++;
for (; len == 255; length += 255)
len = *ip++;
+ if (unlikely(length > (size_t)(length + len)))
+ goto _output_error;
length += len;
}
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
if (length == ML_MASK) {
for (; *ip == 255; length += 255)
ip++;
+ if (unlikely(length > (size_t)(length + *ip)))
+ goto _output_error;
length += *ip++;
}
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
/* write overflow error detected */
_output_error:
- return (int) (-(((char *)ip) - source));
+ return -1;
}
static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
}
}
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
if (length == ML_MASK) {
while (ip < iend) {
int s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
if (s == 255)
continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
/* write overflow error detected */
_output_error:
- return (int) (-(((char *) ip) - source));
+ return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..8563081e8da3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,11 +19,31 @@
#include <linux/lzo.h>
#include "lzodefs.h"
-#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x) \
+ (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x) \
+ (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x) \
+ do { \
+ if (!HAVE_IP(t, x)) \
+ goto input_overrun; \
+ } while (0)
+
+#define NEED_OP(t, x) \
+ do { \
+ if (!HAVE_OP(t, x)) \
+ goto output_overrun; \
+ } while (0)
+
+#define TEST_LB(m_pos) \
+ do { \
+ if ((m_pos) < out) \
+ goto lookbehind_overrun; \
+ } while (0)
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 15 + *ip++;
}
t += 3;
copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
@@ -81,8 +101,8 @@ copy_literal_run:
} else
#endif
{
- NEED_OP(t);
- NEED_IP(t + 3);
+ NEED_OP(t, 0);
+ NEED_IP(t, 3);
do {
*op++ = *ip++;
} while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
m_pos -= t >> 2;
m_pos -= *ip++ << 2;
TEST_LB(m_pos);
- NEED_OP(2);
+ NEED_OP(2, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 31 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
m_pos = op - 1;
next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
while (unlikely(*ip == 0)) {
t += 255;
ip++;
- NEED_IP(1);
+ NEED_IP(1, 0);
}
t += 7 + *ip++;
- NEED_IP(2);
+ NEED_IP(2, 0);
}
next = get_unaligned_le16(ip);
ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
if (op - m_pos >= 8) {
unsigned char *oe = op + t;
- if (likely(HAVE_OP(t + 15))) {
+ if (likely(HAVE_OP(t, 15))) {
do {
COPY8(op, m_pos);
op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
m_pos += 8;
} while (op < oe);
op = oe;
- if (HAVE_IP(6)) {
+ if (HAVE_IP(6, 0)) {
state = next;
COPY4(op, ip);
op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
continue;
}
} else {
- NEED_OP(t);
+ NEED_OP(t, 0);
do {
*op++ = *m_pos++;
} while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
#endif
{
unsigned char *oe = op + t;
- NEED_OP(t);
+ NEED_OP(t, 0);
op[0] = m_pos[0];
op[1] = m_pos[1];
op += 2;
@@ -194,15 +214,15 @@ match_next:
state = next;
t = next;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
- if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
COPY4(op, ip);
op += t;
ip += t;
} else
#endif
{
- NEED_IP(t + 3);
- NEED_OP(t);
+ NEED_IP(t, 3);
+ NEED_OP(t, 0);
while (t > 0) {
*op++ = *ip++;
t--;
diff --git a/lib/net_utils.c b/lib/net_utils.c
index 2e3c52c8d050..148fc6e99ef6 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -3,24 +3,24 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
-int mac_pton(const char *s, u8 *mac)
+bool mac_pton(const char *s, u8 *mac)
{
int i;
/* XX:XX:XX:XX:XX:XX */
if (strlen(s) < 3 * ETH_ALEN - 1)
- return 0;
+ return false;
/* Don't dirty result unless string is valid MAC. */
for (i = 0; i < ETH_ALEN; i++) {
if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
- return 0;
+ return false;
if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
- return 0;
+ return false;
}
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
}
- return 1;
+ return true;
}
EXPORT_SYMBOL(mac_pton);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 10ad042d01be..9c3e85ff0a6c 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -136,6 +136,7 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
errout:
return err;
}
+EXPORT_SYMBOL(nla_validate);
/**
* nla_policy_len - Determin the max. length of a policy
@@ -162,6 +163,7 @@ nla_policy_len(const struct nla_policy *p, int n)
return len;
}
+EXPORT_SYMBOL(nla_policy_len);
/**
* nla_parse - Parse a stream of attributes into a tb buffer
@@ -208,6 +210,7 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
errout:
return err;
}
+EXPORT_SYMBOL(nla_parse);
/**
* nla_find - Find a specific attribute in a stream of attributes
@@ -228,6 +231,7 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
return NULL;
}
+EXPORT_SYMBOL(nla_find);
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
@@ -258,6 +262,7 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
return srclen;
}
+EXPORT_SYMBOL(nla_strlcpy);
/**
* nla_memcpy - Copy a netlink attribute into another memory area
@@ -278,6 +283,7 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
return minlen;
}
+EXPORT_SYMBOL(nla_memcpy);
/**
* nla_memcmp - Compare an attribute with sized memory area
@@ -295,6 +301,7 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
return d;
}
+EXPORT_SYMBOL(nla_memcmp);
/**
* nla_strcmp - Compare a string attribute against a string
@@ -317,6 +324,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
return d;
}
+EXPORT_SYMBOL(nla_strcmp);
#ifdef CONFIG_NET
/**
@@ -502,12 +510,3 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
}
EXPORT_SYMBOL(nla_append);
#endif
-
-EXPORT_SYMBOL(nla_validate);
-EXPORT_SYMBOL(nla_policy_len);
-EXPORT_SYMBOL(nla_parse);
-EXPORT_SYMBOL(nla_find);
-EXPORT_SYMBOL(nla_strlcpy);
-EXPORT_SYMBOL(nla_memcpy);
-EXPORT_SYMBOL(nla_memcmp);
-EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 963b7034a51b..fe5a3342e960 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -31,6 +31,11 @@
#define PCPU_COUNT_BIAS (1U << 31)
+static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+{
+ return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
{
atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
- ref->pcpu_count = alloc_percpu(unsigned);
- if (!ref->pcpu_count)
+ ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
+ if (!ref->pcpu_count_ptr)
return -ENOMEM;
ref->release = release;
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
EXPORT_SYMBOL_GPL(percpu_ref_init);
/**
- * percpu_ref_cancel_init - cancel percpu_ref_init()
- * @ref: percpu_ref to cancel init for
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
*
- * Once a percpu_ref is initialized, its destruction is initiated by
- * percpu_ref_kill() and completes asynchronously, which can be painful to
- * do when destroying a half-constructed object in init failure path.
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init(). @ref must have been initialized successfully, killed
+ * and reached 0 but not exited.
*
- * This function destroys @ref without invoking @ref->release and the
- * memory area containing it can be freed immediately on return. To
- * prevent accidental misuse, it's required that @ref has finished
- * percpu_ref_init(), whether successful or not, but never used.
- *
- * The weird name and usage restriction are to prevent people from using
- * this function by mistake for normal shutdown instead of
- * percpu_ref_kill().
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
*/
-void percpu_ref_cancel_init(struct percpu_ref *ref)
+void percpu_ref_reinit(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count = ref->pcpu_count;
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;
- WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
+ BUG_ON(!pcpu_count);
+ WARN_ON(!percpu_ref_is_zero(ref));
+
+ atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+ /*
+ * Restore per-cpu operation. smp_store_release() is paired with
+ * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following PCPU_REF_DEAD clearing.
+ */
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(pcpu_count, cpu) = 0;
+
+ smp_store_release(&ref->pcpu_count_ptr,
+ ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_exit - undo percpu_ref_init()
+ * @ref: percpu_ref to exit
+ *
+ * This function exits @ref. The caller is responsible for ensuring that
+ * @ref is no longer in active use. The usual places to invoke this
+ * function from are the @ref->release() callback or in init failure path
+ * where percpu_ref_init() succeeded but other parts of the initialization
+ * of the embedding object failed.
+ */
+void percpu_ref_exit(struct percpu_ref *ref)
+{
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
if (pcpu_count) {
- for_each_possible_cpu(cpu)
- WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
- free_percpu(ref->pcpu_count);
+ free_percpu(pcpu_count);
+ ref->pcpu_count_ptr = PCPU_REF_DEAD;
}
}
-EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
+EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{
struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
- unsigned __percpu *pcpu_count = ref->pcpu_count;
+ unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
unsigned count = 0;
int cpu;
- /* Mask out PCPU_REF_DEAD */
- pcpu_count = (unsigned __percpu *)
- (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
-
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu);
- free_percpu(pcpu_count);
-
pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
/*
@@ -152,11 +175,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
+ WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
"percpu_ref_kill() called more than once!\n");
- ref->pcpu_count = (unsigned __percpu *)
- (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
+ ref->pcpu_count_ptr |= PCPU_REF_DEAD;
ref->confirm_kill = confirm_kill;
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
diff --git a/lib/plist.c b/lib/plist.c
index 1ebc95f7a46f..d408e774b746 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
plist_check_head(head);
}
+/**
+ * plist_requeue - Requeue @node at end of same-prio entries.
+ *
+ * This is essentially an optimized plist_del() followed by
+ * plist_add(). It moves an entry already in the plist to
+ * after any other same-priority entries.
+ *
+ * @node: &struct plist_node pointer - entry to be moved
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_requeue(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ BUG_ON(plist_head_empty(head));
+ BUG_ON(plist_node_empty(node));
+
+ if (node == plist_last(head))
+ return;
+
+ iter = plist_next(node);
+
+ if (node->prio != iter->prio)
+ return;
+
+ plist_del(node, head);
+
+ plist_for_each_continue(iter, head) {
+ if (node->prio != iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+ }
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
#ifdef CONFIG_DEBUG_PI_LIST
#include <linux/sched.h>
#include <linux/module.h>
@@ -170,12 +210,20 @@ static void __init plist_test_check(int nr_expect)
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
+static void __init plist_test_requeue(struct plist_node *node)
+{
+ plist_requeue(node, &test_head);
+
+ if (node != plist_last(&test_head))
+ BUG_ON(node->prio == plist_next(node)->prio);
+}
+
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
- pr_debug("start plist test\n");
+ printk(KERN_DEBUG "start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
@@ -193,6 +241,10 @@ static int __init plist_test(void)
nr_expect--;
}
plist_test_check(nr_expect);
+ if (!plist_node_empty(test_node + i)) {
+ plist_test_requeue(test_node + i);
+ plist_test_check(nr_expect);
+ }
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
@@ -203,7 +255,7 @@ static int __init plist_test(void)
plist_test_check(nr_expect);
}
- pr_debug("end plist test\n");
+ printk(KERN_DEBUG "end plist test\n");
return 0;
}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 9599aa72d7a0..3291a8e37490 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -27,6 +27,7 @@
#include <linux/radix-tree.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/string.h>
@@ -194,12 +195,17 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL;
rtp->nr--;
}
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(ret);
}
if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -250,14 +256,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
int ret = -ENOMEM;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
- rtp = &__get_cpu_var(radix_tree_preloads);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
rtp->nodes[rtp->nr++] = node;
else
@@ -1296,7 +1302,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
/**
* __radix_tree_delete_node - try to free node after clearing a slot
* @root: radix tree root
- * @index: index key
* @node: node containing @index
*
* After clearing the slot at @index in @node from radix tree
diff --git a/lib/random32.c b/lib/random32.c
index fa5da61ce7ad..c9b6bf3afe0c 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -40,6 +40,10 @@
#ifdef CONFIG_RANDOM32_SELFTEST
static void __init prandom_state_selftest(void);
+#else
+static inline void prandom_state_selftest(void)
+{
+}
#endif
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
*/
u32 prandom_u32_state(struct rnd_state *state)
{
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+static u32 __extract_hwseed(void)
{
- /* Note: This sort of seeding is ONLY used in test cases and
- * during boot at the time from core_initcall until late_initcall
- * as we don't have a stronger entropy source available yet.
- * After late_initcall, we reseed entire state, we have to (!),
- * otherwise an attacker just needs to search 32 bit space to
- * probe for our internal 128 bit state if he knows a couple
- * of prandom32 outputs!
- */
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
- state->s1 = __seed(LCG(seed), 2U);
- state->s2 = __seed(LCG(state->s1), 8U);
- state->s3 = __seed(LCG(state->s2), 16U);
- state->s4 = __seed(LCG(state->s3), 128U);
+ u32 val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
}
/**
@@ -194,14 +201,13 @@ static int __init prandom_init(void)
{
int i;
-#ifdef CONFIG_RANDOM32_SELFTEST
prandom_state_selftest();
-#endif
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
+ u32 weak_seed = (i + jiffies) ^ random_get_entropy();
- prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+ prandom_seed_early(state, weak_seed, true);
prandom_warmup(state);
}
@@ -210,6 +216,7 @@ static int __init prandom_init(void)
core_initcall(prandom_init);
static void __prandom_timer(unsigned long dontcare);
+
static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test1[i].seed);
+ prandom_seed_early(&state, test1[i].seed, false);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_very_weak(&state, test2[i].seed);
+ prandom_seed_early(&state, test2[i].seed, false);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 65f4effd117f..c16c81a3d430 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -101,7 +101,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \ / \
* p u --> P U
* / /
- * n N
+ * n n
*
* However, since g's parent might be red, and
* 4) does not allow this, we need to recurse
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000000..e6940cf16628
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,797 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/rhashtable.h>
+#include <linux/log2.h>
+
+#define HASH_DEFAULT_SIZE 64UL
+#define HASH_MIN_SIZE 4UL
+
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+{
+ return ht->p.mutex_is_held();
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+#endif
+
+/**
+ * rht_obj - cast hash head to outer object
+ * @ht: hash table
+ * @he: hashed node
+ */
+void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
+{
+ return (void *) he - ht->p.head_offset;
+}
+EXPORT_SYMBOL_GPL(rht_obj);
+
+static u32 __hashfn(const struct rhashtable *ht, const void *key,
+ u32 len, u32 hsize)
+{
+ u32 h;
+
+ h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+}
+
+/**
+ * rhashtable_hashfn - compute hash for key of given length
+ * @ht: hash table to compuate for
+ * @key: pointer to key
+ * @len: length of key
+ *
+ * Computes the hash value using the hash function provided in the 'hashfn'
+ * of struct rhashtable_params. The returned value is guaranteed to be
+ * smaller than the number of buckets in the hash table.
+ */
+u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return __hashfn(ht, key, len, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_hashfn);
+
+static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+{
+ if (unlikely(!ht->p.key_len)) {
+ u32 h;
+
+ h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+
+ return h & (hsize - 1);
+ }
+
+ return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+}
+
+/**
+ * rhashtable_obj_hashfn - compute hash for hashed object
+ * @ht: hash table to compuate for
+ * @ptr: pointer to hashed object
+ *
+ * Computes the hash value using the hash function `hashfn` respectively
+ * 'obj_hashfn' depending on whether the hash table is set up to work with
+ * a fixed length key. The returned value is guaranteed to be smaller than
+ * the number of buckets in the hash table.
+ */
+u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+{
+ struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ return obj_hashfn(ht, ptr, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+
+static u32 head_hashfn(const struct rhashtable *ht,
+ const struct rhash_head *he, u32 hsize)
+{
+ return obj_hashfn(ht, rht_obj(ht, he), hsize);
+}
+
+static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
+ tbl = kzalloc(size, flags);
+ if (tbl == NULL)
+ tbl = vzalloc(size);
+
+ if (tbl == NULL)
+ return NULL;
+
+ tbl->size = nbuckets;
+
+ return tbl;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+ kvfree(tbl);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+{
+ /* Expand table when exceeding 75% load */
+ return ht->nelems > (new_size / 4 * 3);
+}
+EXPORT_SYMBOL_GPL(rht_grow_above_75);
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @new_size: new table size
+ */
+bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+{
+ /* Shrink table beneath 30% load */
+ return ht->nelems < (new_size * 3 / 10);
+}
+EXPORT_SYMBOL_GPL(rht_shrink_below_30);
+
+static void hashtable_chain_unzip(const struct rhashtable *ht,
+ const struct bucket_table *new_tbl,
+ struct bucket_table *old_tbl, size_t n)
+{
+ struct rhash_head *he, *p, *next;
+ unsigned int h;
+
+ /* Old bucket empty, no work needed. */
+ p = rht_dereference(old_tbl->buckets[n], ht);
+ if (!p)
+ return;
+
+ /* Advance the old bucket pointer one or more times until it
+ * reaches a node that doesn't hash to the same bucket as the
+ * previous node p. Call the previous node p;
+ */
+ h = head_hashfn(ht, p, new_tbl->size);
+ rht_for_each(he, p->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) != h)
+ break;
+ p = he;
+ }
+ RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+
+ /* Find the subsequent node which does hash to the same
+ * bucket as node P, or NULL if no such node exists.
+ */
+ next = NULL;
+ if (he) {
+ rht_for_each(he, he->next, ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == h) {
+ next = he;
+ break;
+ }
+ }
+ }
+
+ /* Set p's next pointer to that subsequent node pointer,
+ * bypassing the nodes which do not hash to p's bucket
+ */
+ RCU_INIT_POINTER(p->next, next);
+}
+
+/**
+ * rhashtable_expand - Expand hash table while allowing concurrent lookups
+ * @ht: the hash table to expand
+ * @flags: allocation flags
+ *
+ * A secondary bucket array is allocated and the hash entries are migrated
+ * while keeping them on both lists until the end of the RCU grace period.
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head *he;
+ unsigned int i, h;
+ bool complete;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
+ return 0;
+
+ new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+ if (new_tbl == NULL)
+ return -ENOMEM;
+
+ ht->shift++;
+
+ /* For each new bucket, search the corresponding old bucket
+ * for the first entry that hashes to the new bucket, and
+ * link the new bucket to that entry. Since all the entries
+ * which will end up in the new bucket appear in the same
+ * old bucket, this constructs an entirely valid new hash
+ * table, but with multiple buckets "zipped" together into a
+ * single imprecise chain.
+ */
+ for (i = 0; i < new_tbl->size; i++) {
+ h = i & (old_tbl->size - 1);
+ rht_for_each(he, old_tbl->buckets[h], ht) {
+ if (head_hashfn(ht, he, new_tbl->size) == i) {
+ RCU_INIT_POINTER(new_tbl->buckets[i], he);
+ break;
+ }
+ }
+ }
+
+ /* Publish the new table pointer. Lookups may now traverse
+ * the new table, but they will not benefit from any
+ * additional efficiency until later steps unzip the buckets.
+ */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+
+ /* Unzip interleaved hash chains */
+ do {
+ /* Wait for readers. All new readers will see the new
+ * table, and thus no references to the old table will
+ * remain.
+ */
+ synchronize_rcu();
+
+ /* For each bucket in the old table (each of which
+ * contains items from multiple buckets of the new
+ * table): ...
+ */
+ complete = true;
+ for (i = 0; i < old_tbl->size; i++) {
+ hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
+ if (old_tbl->buckets[i] != NULL)
+ complete = false;
+ }
+ } while (!complete);
+
+ bucket_table_free(old_tbl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_expand);
+
+/**
+ * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
+ * @ht: the hash table to shrink
+ * @flags: allocation flags
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+{
+ struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ unsigned int i;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ if (tbl->size <= HASH_MIN_SIZE)
+ return 0;
+
+ ntbl = bucket_table_alloc(tbl->size / 2, flags);
+ if (ntbl == NULL)
+ return -ENOMEM;
+
+ ht->shift--;
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (i = 0; i < ntbl->size; i++) {
+ ntbl->buckets[i] = tbl->buckets[i];
+
+ /* Link each bucket in the new table to the first bucket
+ * in the old table that contains entries which will hash
+ * to the new bucket.
+ */
+ for (pprev = &ntbl->buckets[i]; *pprev != NULL;
+ pprev = &rht_dereference(*pprev, ht)->next)
+ ;
+ RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+ }
+
+ /* Publish the new, valid hash table */
+ rcu_assign_pointer(ht->tbl, ntbl);
+
+ /* Wait for readers. No new readers will have references to the
+ * old hash table.
+ */
+ synchronize_rcu();
+
+ bucket_table_free(tbl);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_shrink);
+
+/**
+ * rhashtable_insert - insert object into hash hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Will automatically grow the table via rhashtable_expand() if the the
+ * grow_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ u32 hash;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ hash = head_hashfn(ht, obj, tbl->size);
+ RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+ ht->nelems++;
+
+ if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+ rhashtable_expand(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_insert);
+
+/**
+ * rhashtable_remove_pprev - remove object from hash table given previous element
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @pprev: pointer to previous element
+ * @flags: allocation flags (table expansion)
+ *
+ * Identical to rhashtable_remove() but caller is alreayd aware of the element
+ * in front of the element to be deleted. This is in particular useful for
+ * deletion when combined with walking or lookup.
+ */
+void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
+ struct rhash_head **pprev, gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+ ASSERT_RHT_MUTEX(ht);
+
+ RCU_INIT_POINTER(*pprev, obj->next);
+ ht->nelems--;
+
+ if (ht->p.shrink_decision &&
+ ht->p.shrink_decision(ht, tbl->size))
+ rhashtable_shrink(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+
+/**
+ * rhashtable_remove - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @flags: allocation flags (table expansion)
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
+ gfp_t flags)
+{
+ struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ u32 h;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ h = head_hashfn(ht, obj, tbl->size);
+
+ pprev = &tbl->buckets[h];
+ rht_for_each(he, tbl->buckets[h], ht) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rhashtable_remove_pprev(ht, he, pprev, flags);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove);
+
+/**
+ * rhashtable_lookup - lookup key in hash table
+ * @ht: hash table
+ * @key: pointer to key
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * paramter set). It will BUG() if used inappropriately.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ */
+void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+ u32 h;
+
+ BUG_ON(!ht->p.key_len);
+
+ h = __hashfn(ht, key, ht->p.key_len, tbl->size);
+ rht_for_each_rcu(he, tbl->buckets[h], ht) {
+ if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
+ ht->p.key_len))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup);
+
+/**
+ * rhashtable_lookup_compare - search hash table with compare function
+ * @ht: hash table
+ * @hash: hash value of desired entry
+ * @compare: compare function, must return true on match
+ * @arg: argument passed on to compare function
+ *
+ * Traverses the bucket chain behind the provided hash value and calls the
+ * specified compare function for each entry.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+ bool (*compare)(void *, void *), void *arg)
+{
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+ struct rhash_head *he;
+
+ if (unlikely(hash >= tbl->size))
+ return NULL;
+
+ rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+ if (!compare(rht_obj(ht, he), arg))
+ continue;
+ return (void *) he - ht->p.head_offset;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+
+static size_t rounded_hashtable_size(unsigned int nelem)
+{
+ return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
+}
+
+/**
+ * rhashtable_init - initialize a new hash table
+ * @ht: hash table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash table based on the provided configuration
+ * parameters. A table can be configured either with a variable or
+ * fixed length key:
+ *
+ * Configuration Example 1: Fixed length keys
+ * struct test_obj {
+ * int key;
+ * void * my_member;
+ * struct rhash_head node;
+ * };
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .key_offset = offsetof(struct test_obj, key),
+ * .key_len = sizeof(int),
+ * .hashfn = arch_fast_hash,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ *
+ * Configuration Example 2: Variable length keys
+ * struct test_obj {
+ * [...]
+ * struct rhash_head node;
+ * };
+ *
+ * u32 my_hash_fn(const void *data, u32 seed)
+ * {
+ * struct test_obj *obj = data;
+ *
+ * return [... hash ...];
+ * }
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .hashfn = arch_fast_hash,
+ * .obj_hashfn = my_hash_fn,
+ * .mutex_is_held = &my_mutex_is_held,
+ * };
+ */
+int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ size = HASH_DEFAULT_SIZE;
+
+ if ((params->key_len && !params->hashfn) ||
+ (!params->key_len && !params->obj_hashfn))
+ return -EINVAL;
+
+ if (params->nelem_hint)
+ size = rounded_hashtable_size(params->nelem_hint);
+
+ tbl = bucket_table_alloc(size, GFP_KERNEL);
+ if (tbl == NULL)
+ return -ENOMEM;
+
+ memset(ht, 0, sizeof(*ht));
+ ht->shift = ilog2(tbl->size);
+ memcpy(&ht->p, params, sizeof(*params));
+ RCU_INIT_POINTER(ht->tbl, tbl);
+
+ if (!ht->p.hash_rnd)
+ get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_init);
+
+/**
+ * rhashtable_destroy - destroy hash table
+ * @ht: the hash table to destroy
+ *
+ * Frees the bucket array.
+ */
+void rhashtable_destroy(const struct rhashtable *ht)
+{
+ const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+ bucket_table_free(tbl);
+}
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#ifdef CONFIG_TEST_RHASHTABLE
+
+#define TEST_HT_SIZE 8
+#define TEST_ENTRIES 2048
+#define TEST_PTR ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS 4
+
+static int test_mutex_is_held(void)
+{
+ return 1;
+}
+
+struct test_obj {
+ void *ptr;
+ int value;
+ struct rhash_head node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+ unsigned int i;
+
+ for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ struct test_obj *obj;
+ bool expected = !(i % 2);
+ u32 key = i;
+
+ obj = rhashtable_lookup(ht, &key);
+
+ if (expected && !obj) {
+ pr_warn("Test failed: Could not find key %u\n", key);
+ return -ENOENT;
+ } else if (!expected && obj) {
+ pr_warn("Test failed: Unexpected entry found for key %u\n",
+ key);
+ return -EEXIST;
+ } else if (expected && obj) {
+ if (obj->ptr != TEST_PTR || obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+ obj->ptr, TEST_PTR, obj->value, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ bool quiet)
+{
+ unsigned int cnt, i, total = 0;
+ struct test_obj *obj;
+
+ for (i = 0; i < tbl->size; i++) {
+ cnt = 0;
+
+ if (!quiet)
+ pr_info(" [%#4x/%zu]", i, tbl->size);
+
+ rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
+ cnt++;
+ total++;
+ if (!quiet)
+ pr_cont(" [%p],", obj);
+ }
+
+ if (!quiet)
+ pr_cont("\n [%#x] first element: %p, chain length: %u\n",
+ i, tbl->buckets[i], cnt);
+ }
+
+ pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
+ total, ht->nelems, TEST_ENTRIES);
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+ struct bucket_table *tbl;
+ struct test_obj *obj, *next;
+ int err;
+ unsigned int i;
+
+ /*
+ * Insertion Test:
+ * Insert TEST_ENTRIES into table with all keys even numbers
+ */
+ pr_info(" Adding %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ struct test_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ obj->ptr = TEST_PTR;
+ obj->value = i * 2;
+
+ rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ test_bucket_stats(ht, tbl, true);
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table expansion iteration %u...\n", i);
+ rhashtable_expand(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < TEST_NEXPANDS; i++) {
+ pr_info(" Table shrinkage iteration %u...\n", i);
+ rhashtable_shrink(ht, GFP_KERNEL);
+
+ rcu_read_lock();
+ pr_info(" Verifying lookups...\n");
+ test_rht_lookup(ht);
+ rcu_read_unlock();
+ }
+
+ pr_info(" Deleting %d keys\n", TEST_ENTRIES);
+ for (i = 0; i < TEST_ENTRIES; i++) {
+ u32 key = i * 2;
+
+ obj = rhashtable_lookup(ht, &key);
+ BUG_ON(!obj);
+
+ rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+ kfree(obj);
+ }
+
+ return 0;
+
+error:
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
+ kfree(obj);
+
+ return err;
+}
+
+static int __init test_rht_init(void)
+{
+ struct rhashtable ht;
+ struct rhashtable_params params = {
+ .nelem_hint = TEST_HT_SIZE,
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(int),
+ .hashfn = arch_fast_hash,
+ .mutex_is_held = &test_mutex_is_held,
+ .grow_decision = rht_grow_above_75,
+ .shrink_decision = rht_shrink_below_30,
+ };
+ int err;
+
+ pr_info("Running resizable hashtable tests...\n");
+
+ err = rhashtable_init(&ht, &params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ return err;
+ }
+
+ err = test_rhashtable(&ht);
+
+ rhashtable_destroy(&ht);
+
+ return err;
+}
+
+subsys_initcall(test_rht_init);
+
+#endif /* CONFIG_TEST_RHASHTABLE */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 3a8e8e8fb2a5..9cdf62f8accd 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(sg_nents);
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
-#ifndef ARCH_HAS_SG_CHAIN
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
+ * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- sg_free_fn *free_fn)
+ bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
- free_fn(sgl, alloc_size);
+ if (!skip_first_chunk) {
+ free_fn(sgl, alloc_size);
+ skip_first_chunk = false;
+ }
sgl = next;
}
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table);
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table);
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
- unsigned int max_ents, gfp_t gfp_mask,
- sg_alloc_fn *alloc_fn)
+ unsigned int max_ents, struct scatterlist *first_chunk,
+ gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
@@ -251,7 +255,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (nents == 0)
return -EINVAL;
-#ifndef ARCH_HAS_SG_CHAIN
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
left -= sg_size;
- sg = alloc_fn(alloc_size, gfp_mask);
+ if (first_chunk) {
+ sg = first_chunk;
+ first_chunk = NULL;
+ } else {
+ sg = alloc_fn(alloc_size, gfp_mask);
+ }
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- gfp_mask, sg_kmalloc);
+ NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}
diff --git a/lib/string.c b/lib/string.c
index 9b1f9062a202..992bf30af759 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL(strcpy);
#ifndef __HAVE_ARCH_STRNCPY
/**
- * strncpy - Copy a length-limited, %NUL-terminated string
+ * strncpy - Copy a length-limited, C-string
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: The maximum number of bytes to copy
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(strncpy);
#ifndef __HAVE_ARCH_STRLCPY
/**
- * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * strlcpy - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @size: size of destination buffer
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(strcat);
#ifndef __HAVE_ARCH_STRNCAT
/**
- * strncat - Append a length-limited, %NUL-terminated string to another
+ * strncat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The maximum numbers of bytes to copy
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(strncat);
#ifndef __HAVE_ARCH_STRLCAT
/**
- * strlcat - Append a length-limited, %NUL-terminated string to another
+ * strlcat - Append a length-limited, C-string to another
* @dest: The string to be appended to
* @src: The string to append to it
* @count: The size of the destination buffer.
@@ -301,6 +301,24 @@ char *strchr(const char *s, int c)
EXPORT_SYMBOL(strchr);
#endif
+#ifndef __HAVE_ARCH_STRCHRNUL
+/**
+ * strchrnul - Find and return a character in a string, or end of string
+ * @s: The string to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to first occurrence of 'c' in s. If c is not found, then
+ * return a pointer to the null byte at the end of s.
+ */
+char *strchrnul(const char *s, int c)
+{
+ while (*s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchrnul);
+#endif
+
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index ed5c1454dd62..29033f319aea 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -25,12 +25,15 @@
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
- static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
- "EB", "ZB", "YB", NULL};
- static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
- "EiB", "ZiB", "YiB", NULL };
- static const char **units_str[] = {
- [STRING_UNITS_10] = units_10,
+ static const char *const units_10[] = {
+ "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL
+ };
+ static const char *const units_2[] = {
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
+ NULL
+ };
+ static const char *const *const units_str[] = {
+ [STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
static const unsigned int divisor[] = {
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b604b831f4d1..4abda074ea45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
- for (i = 0; i < io_tlb_nslabs; i++)
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_index = 0;
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_list)
goto cleanup3;
- for (i = 0; i < io_tlb_nslabs; i++)
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_index = 0;
-
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_orig_addr)
goto cleanup4;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+ io_tlb_index = 0;
swiotlb_print_info();
@@ -374,7 +377,7 @@ void __init swiotlb_free(void)
io_tlb_nslabs = 0;
}
-static int is_swiotlb_buffer(phys_addr_t paddr)
+int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
/*
* First, sync the memory before unmapping the entry
*/
- if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
- for (i = index + nslots - 1; i >= index; i--)
+ for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
+ if (orig_addr == INVALID_PHYS_ADDR)
+ return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index bea3f3fa3f02..4137bca5f8e8 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -3,7 +3,7 @@
#include <linux/module.h>
#define for_each_test(i, test) \
- for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(test); i++)
struct test_fail {
const char *str;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644
index 000000000000..89e0345733bd
--- /dev/null
+++ b/lib/test_bpf.c
@@ -0,0 +1,1929 @@
+/*
+ * Testsuite for BPF interpreter and BPF JIT compiler
+ *
+ * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+/* General test specific settings */
+#define MAX_SUBTESTS 3
+#define MAX_TESTRUNS 10000
+#define MAX_DATA 128
+#define MAX_INSNS 512
+#define MAX_K 0xffffFFFF
+
+/* Few constants used to init test 'skb' */
+#define SKB_TYPE 3
+#define SKB_MARK 0x1234aaaa
+#define SKB_HASH 0x1234aaab
+#define SKB_QUEUE_MAP 123
+#define SKB_VLAN_TCI 0xffff
+#define SKB_DEV_IFINDEX 577
+#define SKB_DEV_TYPE 588
+
+/* Redefine REGs to make tests less verbose */
+#define R0 BPF_REG_0
+#define R1 BPF_REG_1
+#define R2 BPF_REG_2
+#define R3 BPF_REG_3
+#define R4 BPF_REG_4
+#define R5 BPF_REG_5
+#define R6 BPF_REG_6
+#define R7 BPF_REG_7
+#define R8 BPF_REG_8
+#define R9 BPF_REG_9
+#define R10 BPF_REG_10
+
+/* Flags that can be passed to test cases */
+#define FLAG_NO_DATA BIT(0)
+#define FLAG_EXPECTED_FAIL BIT(1)
+
+enum {
+ CLASSIC = BIT(6), /* Old BPF instructions only. */
+ INTERNAL = BIT(7), /* Extended instruction set. */
+};
+
+#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
+
+struct bpf_test {
+ const char *descr;
+ union {
+ struct sock_filter insns[MAX_INSNS];
+ struct bpf_insn insns_int[MAX_INSNS];
+ } u;
+ __u8 aux;
+ __u8 data[MAX_DATA];
+ struct {
+ int data_size;
+ __u32 result;
+ } test[MAX_SUBTESTS];
+};
+
+static struct bpf_test tests[] = {
+ {
+ "TAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 2, 10 }, { 3, 20 }, { 4, 30 } },
+ },
+ {
+ "TXA",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 1, 2 }, { 3, 6 }, { 4, 8 } },
+ },
+ {
+ "ADD_SUB_MUL_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "DIV_KX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 8),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x40000001 } }
+ },
+ {
+ "AND_OR_LSH_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+ BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x800000ff }, { 1, 0x800000ff } },
+ },
+ {
+ "LD_IMM_0",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 1 } },
+ },
+ {
+ "LD_IND",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3 },
+ { { 1, 0 }, { 2, 3 } },
+ },
+ {
+ "LD_IND_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3, 0xff },
+ { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+ },
+ {
+ "LD_ABS_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 15, 0 }, { 16, 3 } },
+ },
+ {
+ "LD_IND_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 14, 0 }, { 15, 1 }, { 17, 3 } },
+ },
+ {
+ "LD_PKTTYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 3 }, { 10, 3 } },
+ },
+ {
+ "LD_MARK",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MARK),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_MARK}, { 10, SKB_MARK} },
+ },
+ {
+ "LD_RXHASH",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_RXHASH),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_HASH}, { 10, SKB_HASH} },
+ },
+ {
+ "LD_QUEUE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_QUEUE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+ },
+ {
+ "LD_PROTOCOL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PROTOCOL),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30 },
+ { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+ },
+ {
+ "LD_VLAN_TAG",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
+ { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+ },
+ },
+ {
+ "LD_VLAN_TAG_PRESENT",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+ { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ },
+ },
+ {
+ "LD_IFINDEX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_IFINDEX),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+ },
+ {
+ "LD_HATYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_HATYPE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+ },
+ {
+ "LD_CPU",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 } },
+ },
+ {
+ "LD_NLATTR",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 6 } },
+ },
+ {
+ "LD_NLATTR_NEST",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 10 } },
+ },
+ {
+ "LD_PAYLOAD_OFF",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+ * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+ * id 9737, seq 1, length 64
+ */
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+ 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+ { { 30, 0 }, { 100, 42 } },
+ },
+ {
+ "LD_ANC_XOR",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 300),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
+ },
+ {
+ "SPILL_FILL",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_RSH, 1),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+ BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+ BPF_STMT(BPF_STX, 15), /* M3 = len */
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_LD | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+ },
+ {
+ "JEQ",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 3, 3, 3, 3, 3 },
+ { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGT",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGE",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 40),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 1, 2, 3, 4, 5 },
+ { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+ },
+ {
+ "JSET",
+ .u.insns = {
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 0, 0xAA, 0x55, 1 },
+ { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+ },
+ {
+ "tcpdump port 22",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+ * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+ * seq 1305692979:1305693027, ack 3650467037, win 65535,
+ * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+ */
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "tcpdump complex",
+ .u.insns = {
+ /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+ * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+ * (len > 115 or len < 30000000000)' -d
+ */
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
+ BPF_STMT(BPF_ST, 1),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ST, 5),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 5),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "RET_A",
+ .u.insns = {
+ /* check that unitialized X and A contain zeros */
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { {1, 0}, {2, 0} },
+ },
+ {
+ "INT: ADD trivial",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_IMM(BPF_ADD, R1, -1),
+ BPF_ALU64_IMM(BPF_MUL, R1, 3),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "INT: MUL_X",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL_X2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU32_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL32_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU32_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ /* Have to test all register combinations, since
+ * JITing of different registers will produce
+ * different asm code.
+ */
+ "INT: ADD 64-bit",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 20),
+ BPF_ALU64_IMM(BPF_ADD, R1, 20),
+ BPF_ALU64_IMM(BPF_ADD, R2, 20),
+ BPF_ALU64_IMM(BPF_ADD, R3, 20),
+ BPF_ALU64_IMM(BPF_ADD, R4, 20),
+ BPF_ALU64_IMM(BPF_ADD, R5, 20),
+ BPF_ALU64_IMM(BPF_ADD, R6, 20),
+ BPF_ALU64_IMM(BPF_ADD, R7, 20),
+ BPF_ALU64_IMM(BPF_ADD, R8, 20),
+ BPF_ALU64_IMM(BPF_ADD, R9, 20),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R1, R0),
+ BPF_ALU64_REG(BPF_ADD, R1, R1),
+ BPF_ALU64_REG(BPF_ADD, R1, R2),
+ BPF_ALU64_REG(BPF_ADD, R1, R3),
+ BPF_ALU64_REG(BPF_ADD, R1, R4),
+ BPF_ALU64_REG(BPF_ADD, R1, R5),
+ BPF_ALU64_REG(BPF_ADD, R1, R6),
+ BPF_ALU64_REG(BPF_ADD, R1, R7),
+ BPF_ALU64_REG(BPF_ADD, R1, R8),
+ BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R2, R0),
+ BPF_ALU64_REG(BPF_ADD, R2, R1),
+ BPF_ALU64_REG(BPF_ADD, R2, R2),
+ BPF_ALU64_REG(BPF_ADD, R2, R3),
+ BPF_ALU64_REG(BPF_ADD, R2, R4),
+ BPF_ALU64_REG(BPF_ADD, R2, R5),
+ BPF_ALU64_REG(BPF_ADD, R2, R6),
+ BPF_ALU64_REG(BPF_ADD, R2, R7),
+ BPF_ALU64_REG(BPF_ADD, R2, R8),
+ BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R3, R0),
+ BPF_ALU64_REG(BPF_ADD, R3, R1),
+ BPF_ALU64_REG(BPF_ADD, R3, R2),
+ BPF_ALU64_REG(BPF_ADD, R3, R3),
+ BPF_ALU64_REG(BPF_ADD, R3, R4),
+ BPF_ALU64_REG(BPF_ADD, R3, R5),
+ BPF_ALU64_REG(BPF_ADD, R3, R6),
+ BPF_ALU64_REG(BPF_ADD, R3, R7),
+ BPF_ALU64_REG(BPF_ADD, R3, R8),
+ BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R4, R0),
+ BPF_ALU64_REG(BPF_ADD, R4, R1),
+ BPF_ALU64_REG(BPF_ADD, R4, R2),
+ BPF_ALU64_REG(BPF_ADD, R4, R3),
+ BPF_ALU64_REG(BPF_ADD, R4, R4),
+ BPF_ALU64_REG(BPF_ADD, R4, R5),
+ BPF_ALU64_REG(BPF_ADD, R4, R6),
+ BPF_ALU64_REG(BPF_ADD, R4, R7),
+ BPF_ALU64_REG(BPF_ADD, R4, R8),
+ BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R5, R0),
+ BPF_ALU64_REG(BPF_ADD, R5, R1),
+ BPF_ALU64_REG(BPF_ADD, R5, R2),
+ BPF_ALU64_REG(BPF_ADD, R5, R3),
+ BPF_ALU64_REG(BPF_ADD, R5, R4),
+ BPF_ALU64_REG(BPF_ADD, R5, R5),
+ BPF_ALU64_REG(BPF_ADD, R5, R6),
+ BPF_ALU64_REG(BPF_ADD, R5, R7),
+ BPF_ALU64_REG(BPF_ADD, R5, R8),
+ BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R6, R0),
+ BPF_ALU64_REG(BPF_ADD, R6, R1),
+ BPF_ALU64_REG(BPF_ADD, R6, R2),
+ BPF_ALU64_REG(BPF_ADD, R6, R3),
+ BPF_ALU64_REG(BPF_ADD, R6, R4),
+ BPF_ALU64_REG(BPF_ADD, R6, R5),
+ BPF_ALU64_REG(BPF_ADD, R6, R6),
+ BPF_ALU64_REG(BPF_ADD, R6, R7),
+ BPF_ALU64_REG(BPF_ADD, R6, R8),
+ BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R7, R0),
+ BPF_ALU64_REG(BPF_ADD, R7, R1),
+ BPF_ALU64_REG(BPF_ADD, R7, R2),
+ BPF_ALU64_REG(BPF_ADD, R7, R3),
+ BPF_ALU64_REG(BPF_ADD, R7, R4),
+ BPF_ALU64_REG(BPF_ADD, R7, R5),
+ BPF_ALU64_REG(BPF_ADD, R7, R6),
+ BPF_ALU64_REG(BPF_ADD, R7, R7),
+ BPF_ALU64_REG(BPF_ADD, R7, R8),
+ BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R8, R0),
+ BPF_ALU64_REG(BPF_ADD, R8, R1),
+ BPF_ALU64_REG(BPF_ADD, R8, R2),
+ BPF_ALU64_REG(BPF_ADD, R8, R3),
+ BPF_ALU64_REG(BPF_ADD, R8, R4),
+ BPF_ALU64_REG(BPF_ADD, R8, R5),
+ BPF_ALU64_REG(BPF_ADD, R8, R6),
+ BPF_ALU64_REG(BPF_ADD, R8, R7),
+ BPF_ALU64_REG(BPF_ADD, R8, R8),
+ BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R9, R0),
+ BPF_ALU64_REG(BPF_ADD, R9, R1),
+ BPF_ALU64_REG(BPF_ADD, R9, R2),
+ BPF_ALU64_REG(BPF_ADD, R9, R3),
+ BPF_ALU64_REG(BPF_ADD, R9, R4),
+ BPF_ALU64_REG(BPF_ADD, R9, R5),
+ BPF_ALU64_REG(BPF_ADD, R9, R6),
+ BPF_ALU64_REG(BPF_ADD, R9, R7),
+ BPF_ALU64_REG(BPF_ADD, R9, R8),
+ BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU64_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ {
+ "INT: ADD 32-bit",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 20),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_IMM(BPF_MOV, R3, 3),
+ BPF_ALU32_IMM(BPF_MOV, R4, 4),
+ BPF_ALU32_IMM(BPF_MOV, R5, 5),
+ BPF_ALU32_IMM(BPF_MOV, R6, 6),
+ BPF_ALU32_IMM(BPF_MOV, R7, 7),
+ BPF_ALU32_IMM(BPF_MOV, R8, 8),
+ BPF_ALU32_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R1, 10),
+ BPF_ALU64_IMM(BPF_ADD, R2, 10),
+ BPF_ALU64_IMM(BPF_ADD, R3, 10),
+ BPF_ALU64_IMM(BPF_ADD, R4, 10),
+ BPF_ALU64_IMM(BPF_ADD, R5, 10),
+ BPF_ALU64_IMM(BPF_ADD, R6, 10),
+ BPF_ALU64_IMM(BPF_ADD, R7, 10),
+ BPF_ALU64_IMM(BPF_ADD, R8, 10),
+ BPF_ALU64_IMM(BPF_ADD, R9, 10),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_ALU32_REG(BPF_ADD, R0, R2),
+ BPF_ALU32_REG(BPF_ADD, R0, R3),
+ BPF_ALU32_REG(BPF_ADD, R0, R4),
+ BPF_ALU32_REG(BPF_ADD, R0, R5),
+ BPF_ALU32_REG(BPF_ADD, R0, R6),
+ BPF_ALU32_REG(BPF_ADD, R0, R7),
+ BPF_ALU32_REG(BPF_ADD, R0, R8),
+ BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R1, R0),
+ BPF_ALU32_REG(BPF_ADD, R1, R1),
+ BPF_ALU32_REG(BPF_ADD, R1, R2),
+ BPF_ALU32_REG(BPF_ADD, R1, R3),
+ BPF_ALU32_REG(BPF_ADD, R1, R4),
+ BPF_ALU32_REG(BPF_ADD, R1, R5),
+ BPF_ALU32_REG(BPF_ADD, R1, R6),
+ BPF_ALU32_REG(BPF_ADD, R1, R7),
+ BPF_ALU32_REG(BPF_ADD, R1, R8),
+ BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R2, R0),
+ BPF_ALU32_REG(BPF_ADD, R2, R1),
+ BPF_ALU32_REG(BPF_ADD, R2, R2),
+ BPF_ALU32_REG(BPF_ADD, R2, R3),
+ BPF_ALU32_REG(BPF_ADD, R2, R4),
+ BPF_ALU32_REG(BPF_ADD, R2, R5),
+ BPF_ALU32_REG(BPF_ADD, R2, R6),
+ BPF_ALU32_REG(BPF_ADD, R2, R7),
+ BPF_ALU32_REG(BPF_ADD, R2, R8),
+ BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R3, R0),
+ BPF_ALU32_REG(BPF_ADD, R3, R1),
+ BPF_ALU32_REG(BPF_ADD, R3, R2),
+ BPF_ALU32_REG(BPF_ADD, R3, R3),
+ BPF_ALU32_REG(BPF_ADD, R3, R4),
+ BPF_ALU32_REG(BPF_ADD, R3, R5),
+ BPF_ALU32_REG(BPF_ADD, R3, R6),
+ BPF_ALU32_REG(BPF_ADD, R3, R7),
+ BPF_ALU32_REG(BPF_ADD, R3, R8),
+ BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R4, R0),
+ BPF_ALU32_REG(BPF_ADD, R4, R1),
+ BPF_ALU32_REG(BPF_ADD, R4, R2),
+ BPF_ALU32_REG(BPF_ADD, R4, R3),
+ BPF_ALU32_REG(BPF_ADD, R4, R4),
+ BPF_ALU32_REG(BPF_ADD, R4, R5),
+ BPF_ALU32_REG(BPF_ADD, R4, R6),
+ BPF_ALU32_REG(BPF_ADD, R4, R7),
+ BPF_ALU32_REG(BPF_ADD, R4, R8),
+ BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R5, R0),
+ BPF_ALU32_REG(BPF_ADD, R5, R1),
+ BPF_ALU32_REG(BPF_ADD, R5, R2),
+ BPF_ALU32_REG(BPF_ADD, R5, R3),
+ BPF_ALU32_REG(BPF_ADD, R5, R4),
+ BPF_ALU32_REG(BPF_ADD, R5, R5),
+ BPF_ALU32_REG(BPF_ADD, R5, R6),
+ BPF_ALU32_REG(BPF_ADD, R5, R7),
+ BPF_ALU32_REG(BPF_ADD, R5, R8),
+ BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R6, R0),
+ BPF_ALU32_REG(BPF_ADD, R6, R1),
+ BPF_ALU32_REG(BPF_ADD, R6, R2),
+ BPF_ALU32_REG(BPF_ADD, R6, R3),
+ BPF_ALU32_REG(BPF_ADD, R6, R4),
+ BPF_ALU32_REG(BPF_ADD, R6, R5),
+ BPF_ALU32_REG(BPF_ADD, R6, R6),
+ BPF_ALU32_REG(BPF_ADD, R6, R7),
+ BPF_ALU32_REG(BPF_ADD, R6, R8),
+ BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R7, R0),
+ BPF_ALU32_REG(BPF_ADD, R7, R1),
+ BPF_ALU32_REG(BPF_ADD, R7, R2),
+ BPF_ALU32_REG(BPF_ADD, R7, R3),
+ BPF_ALU32_REG(BPF_ADD, R7, R4),
+ BPF_ALU32_REG(BPF_ADD, R7, R5),
+ BPF_ALU32_REG(BPF_ADD, R7, R6),
+ BPF_ALU32_REG(BPF_ADD, R7, R7),
+ BPF_ALU32_REG(BPF_ADD, R7, R8),
+ BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R8, R0),
+ BPF_ALU32_REG(BPF_ADD, R8, R1),
+ BPF_ALU32_REG(BPF_ADD, R8, R2),
+ BPF_ALU32_REG(BPF_ADD, R8, R3),
+ BPF_ALU32_REG(BPF_ADD, R8, R4),
+ BPF_ALU32_REG(BPF_ADD, R8, R5),
+ BPF_ALU32_REG(BPF_ADD, R8, R6),
+ BPF_ALU32_REG(BPF_ADD, R8, R7),
+ BPF_ALU32_REG(BPF_ADD, R8, R8),
+ BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R9, R0),
+ BPF_ALU32_REG(BPF_ADD, R9, R1),
+ BPF_ALU32_REG(BPF_ADD, R9, R2),
+ BPF_ALU32_REG(BPF_ADD, R9, R3),
+ BPF_ALU32_REG(BPF_ADD, R9, R4),
+ BPF_ALU32_REG(BPF_ADD, R9, R5),
+ BPF_ALU32_REG(BPF_ADD, R9, R6),
+ BPF_ALU32_REG(BPF_ADD, R9, R7),
+ BPF_ALU32_REG(BPF_ADD, R9, R8),
+ BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU32_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: SUB",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R0),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_SUB, R1, R3),
+ BPF_ALU64_REG(BPF_SUB, R1, R4),
+ BPF_ALU64_REG(BPF_SUB, R1, R5),
+ BPF_ALU64_REG(BPF_SUB, R1, R6),
+ BPF_ALU64_REG(BPF_SUB, R1, R7),
+ BPF_ALU64_REG(BPF_SUB, R1, R8),
+ BPF_ALU64_REG(BPF_SUB, R1, R9),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_REG(BPF_SUB, R2, R0),
+ BPF_ALU64_REG(BPF_SUB, R2, R1),
+ BPF_ALU64_REG(BPF_SUB, R2, R3),
+ BPF_ALU64_REG(BPF_SUB, R2, R4),
+ BPF_ALU64_REG(BPF_SUB, R2, R5),
+ BPF_ALU64_REG(BPF_SUB, R2, R6),
+ BPF_ALU64_REG(BPF_SUB, R2, R7),
+ BPF_ALU64_REG(BPF_SUB, R2, R8),
+ BPF_ALU64_REG(BPF_SUB, R2, R9),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_REG(BPF_SUB, R3, R0),
+ BPF_ALU64_REG(BPF_SUB, R3, R1),
+ BPF_ALU64_REG(BPF_SUB, R3, R2),
+ BPF_ALU64_REG(BPF_SUB, R3, R4),
+ BPF_ALU64_REG(BPF_SUB, R3, R5),
+ BPF_ALU64_REG(BPF_SUB, R3, R6),
+ BPF_ALU64_REG(BPF_SUB, R3, R7),
+ BPF_ALU64_REG(BPF_SUB, R3, R8),
+ BPF_ALU64_REG(BPF_SUB, R3, R9),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_REG(BPF_SUB, R4, R0),
+ BPF_ALU64_REG(BPF_SUB, R4, R1),
+ BPF_ALU64_REG(BPF_SUB, R4, R2),
+ BPF_ALU64_REG(BPF_SUB, R4, R3),
+ BPF_ALU64_REG(BPF_SUB, R4, R5),
+ BPF_ALU64_REG(BPF_SUB, R4, R6),
+ BPF_ALU64_REG(BPF_SUB, R4, R7),
+ BPF_ALU64_REG(BPF_SUB, R4, R8),
+ BPF_ALU64_REG(BPF_SUB, R4, R9),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_REG(BPF_SUB, R5, R0),
+ BPF_ALU64_REG(BPF_SUB, R5, R1),
+ BPF_ALU64_REG(BPF_SUB, R5, R2),
+ BPF_ALU64_REG(BPF_SUB, R5, R3),
+ BPF_ALU64_REG(BPF_SUB, R5, R4),
+ BPF_ALU64_REG(BPF_SUB, R5, R6),
+ BPF_ALU64_REG(BPF_SUB, R5, R7),
+ BPF_ALU64_REG(BPF_SUB, R5, R8),
+ BPF_ALU64_REG(BPF_SUB, R5, R9),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_REG(BPF_SUB, R6, R0),
+ BPF_ALU64_REG(BPF_SUB, R6, R1),
+ BPF_ALU64_REG(BPF_SUB, R6, R2),
+ BPF_ALU64_REG(BPF_SUB, R6, R3),
+ BPF_ALU64_REG(BPF_SUB, R6, R4),
+ BPF_ALU64_REG(BPF_SUB, R6, R5),
+ BPF_ALU64_REG(BPF_SUB, R6, R7),
+ BPF_ALU64_REG(BPF_SUB, R6, R8),
+ BPF_ALU64_REG(BPF_SUB, R6, R9),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_REG(BPF_SUB, R7, R0),
+ BPF_ALU64_REG(BPF_SUB, R7, R1),
+ BPF_ALU64_REG(BPF_SUB, R7, R2),
+ BPF_ALU64_REG(BPF_SUB, R7, R3),
+ BPF_ALU64_REG(BPF_SUB, R7, R4),
+ BPF_ALU64_REG(BPF_SUB, R7, R5),
+ BPF_ALU64_REG(BPF_SUB, R7, R6),
+ BPF_ALU64_REG(BPF_SUB, R7, R8),
+ BPF_ALU64_REG(BPF_SUB, R7, R9),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_REG(BPF_SUB, R8, R0),
+ BPF_ALU64_REG(BPF_SUB, R8, R1),
+ BPF_ALU64_REG(BPF_SUB, R8, R2),
+ BPF_ALU64_REG(BPF_SUB, R8, R3),
+ BPF_ALU64_REG(BPF_SUB, R8, R4),
+ BPF_ALU64_REG(BPF_SUB, R8, R5),
+ BPF_ALU64_REG(BPF_SUB, R8, R6),
+ BPF_ALU64_REG(BPF_SUB, R8, R7),
+ BPF_ALU64_REG(BPF_SUB, R8, R9),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_REG(BPF_SUB, R9, R0),
+ BPF_ALU64_REG(BPF_SUB, R9, R1),
+ BPF_ALU64_REG(BPF_SUB, R9, R2),
+ BPF_ALU64_REG(BPF_SUB, R9, R3),
+ BPF_ALU64_REG(BPF_SUB, R9, R4),
+ BPF_ALU64_REG(BPF_SUB, R9, R5),
+ BPF_ALU64_REG(BPF_SUB, R9, R6),
+ BPF_ALU64_REG(BPF_SUB, R9, R7),
+ BPF_ALU64_REG(BPF_SUB, R9, R8),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 11 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: XOR",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_XOR, R1, R1),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R2, R2),
+ BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R2, R2),
+ BPF_ALU64_REG(BPF_XOR, R3, R3),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R3, R3),
+ BPF_ALU64_REG(BPF_XOR, R4, R4),
+ BPF_ALU64_IMM(BPF_MOV, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R5, -1),
+ BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R4, R4),
+ BPF_ALU64_REG(BPF_XOR, R5, R5),
+ BPF_ALU64_IMM(BPF_MOV, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R5, 1),
+ BPF_ALU64_REG(BPF_SUB, R5, R5),
+ BPF_ALU64_REG(BPF_XOR, R6, R6),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R8, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R6, R6),
+ BPF_ALU64_REG(BPF_XOR, R7, R7),
+ BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R7, R7),
+ BPF_ALU64_REG(BPF_XOR, R8, R8),
+ BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R8, R8),
+ BPF_ALU64_REG(BPF_XOR, R9, R9),
+ BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R9, R9),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: MUL",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_MUL, R0, R0),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_REG(BPF_MUL, R0, R2),
+ BPF_ALU64_REG(BPF_MUL, R0, R3),
+ BPF_ALU64_REG(BPF_MUL, R0, R4),
+ BPF_ALU64_REG(BPF_MUL, R0, R5),
+ BPF_ALU64_REG(BPF_MUL, R0, R6),
+ BPF_ALU64_REG(BPF_MUL, R0, R7),
+ BPF_ALU64_REG(BPF_MUL, R0, R8),
+ BPF_ALU64_REG(BPF_MUL, R0, R9),
+ BPF_ALU64_IMM(BPF_MUL, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R1, R0),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_REG(BPF_MUL, R1, R3),
+ BPF_ALU64_REG(BPF_MUL, R1, R4),
+ BPF_ALU64_REG(BPF_MUL, R1, R5),
+ BPF_ALU64_REG(BPF_MUL, R1, R6),
+ BPF_ALU64_REG(BPF_MUL, R1, R7),
+ BPF_ALU64_REG(BPF_MUL, R1, R8),
+ BPF_ALU64_REG(BPF_MUL, R1, R9),
+ BPF_ALU64_IMM(BPF_MUL, R1, 10),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_LSH, R1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R2, R0),
+ BPF_ALU64_REG(BPF_MUL, R2, R1),
+ BPF_ALU64_REG(BPF_MUL, R2, R3),
+ BPF_ALU64_REG(BPF_MUL, R2, R4),
+ BPF_ALU64_REG(BPF_MUL, R2, R5),
+ BPF_ALU64_REG(BPF_MUL, R2, R6),
+ BPF_ALU64_REG(BPF_MUL, R2, R7),
+ BPF_ALU64_REG(BPF_MUL, R2, R8),
+ BPF_ALU64_REG(BPF_MUL, R2, R9),
+ BPF_ALU64_IMM(BPF_MUL, R2, 10),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_MOV, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x35d97ef2 } }
+ },
+ {
+ "INT: ALU MIX",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_ADD, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_XOR, R2, 3),
+ BPF_ALU64_REG(BPF_DIV, R0, R2),
+ BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOD, R0, 3),
+ BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "INT: DIV + ABS",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R6, R1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_REG(BPF_DIV, R0, R2),
+ BPF_ALU64_REG(BPF_MOV, R8, R0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, R8, R0),
+ BPF_LD_IND(BPF_B, R8, -70),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 10, 20, 30, 40, 50 },
+ { { 4, 0 }, { 5, 10 } }
+ },
+ {
+ "INT: DIV by zero",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_MOV, R6, R1),
+ BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, R0, R7),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { 10, 20, 30, 40, 50 },
+ { { 3, 0 }, { 4, 0 } }
+ },
+ {
+ "check: missing ret",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: div_k_0",
+ .u.insns = {
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: unknown insn",
+ .u.insns = {
+ /* seccomp insn, rejected in socket filter */
+ BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "check: out of range spill/fill",
+ .u.insns = {
+ BPF_STMT(BPF_STX, 16),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { }
+ },
+ {
+ "JUMPS + HOLES",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC,
+ { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
+ 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
+ 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
+ 0xc0, 0xa8, 0x33, 0x01,
+ 0xc0, 0xa8, 0x33, 0x02,
+ 0xbb, 0xb6,
+ 0xa9, 0xfa,
+ 0x00, 0x14, 0x00, 0x00,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc },
+ { { 88, 0x001b } }
+ },
+ {
+ "check: RET X",
+ .u.insns = {
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ {
+ "check: LDX + RET X",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 42),
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: alt STX + LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 100),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 116 } },
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: full STX + full LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x2a5a5e5 } },
+ },
+ {
+ "check: SKF_AD_MAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MAX),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ },
+ { /* Passes checker but fails during runtime. */
+ "LD [SKF_AD_OFF-1]",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF - 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 } },
+ },
+};
+
+static struct net_device dev;
+
+static struct sk_buff *populate_skb(char *buf, int size)
+{
+ struct sk_buff *skb;
+
+ if (size >= MAX_DATA)
+ return NULL;
+
+ skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memcpy(__skb_put(skb, size), buf, size);
+
+ /* Initialize a fake skb with test pattern. */
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = SKB_TYPE;
+ skb->mark = SKB_MARK;
+ skb->hash = SKB_HASH;
+ skb->queue_mapping = SKB_QUEUE_MAP;
+ skb->vlan_tci = SKB_VLAN_TCI;
+ skb->dev = &dev;
+ skb->dev->ifindex = SKB_DEV_IFINDEX;
+ skb->dev->type = SKB_DEV_TYPE;
+ skb_set_network_header(skb, min(size, ETH_HLEN));
+
+ return skb;
+}
+
+static void *generate_test_data(struct bpf_test *test, int sub)
+{
+ if (test->aux & FLAG_NO_DATA)
+ return NULL;
+
+ /* Test case expects an skb, so populate one. Various
+ * subtests generate skbs of different sizes based on
+ * the same data.
+ */
+ return populate_skb(test->data, test->test[sub].data_size);
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+ if (test->aux & FLAG_NO_DATA)
+ return;
+
+ kfree_skb(data);
+}
+
+static int probe_filter_length(struct sock_filter *fp)
+{
+ int len = 0;
+
+ for (len = MAX_INSNS - 1; len > 0; --len)
+ if (fp[len].code != 0 || fp[len].k != 0)
+ break;
+
+ return len + 1;
+}
+
+static struct bpf_prog *generate_filter(int which, int *err)
+{
+ struct bpf_prog *fp;
+ struct sock_fprog_kern fprog;
+ unsigned int flen = probe_filter_length(tests[which].u.insns);
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+ switch (test_type) {
+ case CLASSIC:
+ fprog.filter = tests[which].u.insns;
+ fprog.len = flen;
+
+ *err = bpf_prog_create(&fp, &fprog);
+ if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+ if (*err == -EINVAL) {
+ pr_cont("PASS\n");
+ /* Verifier rejected filter as expected. */
+ *err = 0;
+ return NULL;
+ } else {
+ pr_cont("UNEXPECTED_PASS\n");
+ /* Verifier didn't reject the test that's
+ * bad enough, just return!
+ */
+ *err = -EINVAL;
+ return NULL;
+ }
+ }
+ /* We don't expect to fail. */
+ if (*err) {
+ pr_cont("FAIL to attach err=%d len=%d\n",
+ *err, fprog.len);
+ return NULL;
+ }
+ break;
+
+ case INTERNAL:
+ fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
+ if (fp == NULL) {
+ pr_cont("UNEXPECTED_FAIL no memory left\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ fp->len = flen;
+ memcpy(fp->insnsi, tests[which].u.insns_int,
+ fp->len * sizeof(struct bpf_insn));
+
+ bpf_prog_select_runtime(fp);
+ break;
+ }
+
+ *err = 0;
+ return fp;
+}
+
+static void release_filter(struct bpf_prog *fp, int which)
+{
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+ switch (test_type) {
+ case CLASSIC:
+ bpf_prog_destroy(fp);
+ break;
+ case INTERNAL:
+ bpf_prog_free(fp);
+ break;
+ }
+}
+
+static int __run_one(const struct bpf_prog *fp, const void *data,
+ int runs, u64 *duration)
+{
+ u64 start, finish;
+ int ret, i;
+
+ start = ktime_to_us(ktime_get());
+
+ for (i = 0; i < runs; i++)
+ ret = BPF_PROG_RUN(fp, data);
+
+ finish = ktime_to_us(ktime_get());
+
+ *duration = (finish - start) * 1000ULL;
+ do_div(*duration, runs);
+
+ return ret;
+}
+
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
+{
+ int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+ for (i = 0; i < MAX_SUBTESTS; i++) {
+ void *data;
+ u64 duration;
+ u32 ret;
+
+ if (test->test[i].data_size == 0 &&
+ test->test[i].result == 0)
+ break;
+
+ data = generate_test_data(test, i);
+ ret = __run_one(fp, data, runs, &duration);
+ release_test_data(test, data);
+
+ if (ret == test->test[i].result) {
+ pr_cont("%lld ", duration);
+ } else {
+ pr_cont("ret %d != %d ", ret,
+ test->test[i].result);
+ err_cnt++;
+ }
+ }
+
+ return err_cnt;
+}
+
+static __init int test_bpf(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_prog *fp;
+ int err;
+
+ pr_info("#%d %s ", i, tests[i].descr);
+
+ fp = generate_filter(i, &err);
+ if (fp == NULL) {
+ if (err == 0) {
+ pass_cnt++;
+ continue;
+ }
+
+ return err;
+ }
+ err = run_one(fp, &tests[i]);
+ release_filter(fp, i);
+
+ if (err) {
+ pr_cont("FAIL (%d times)\n", err);
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
+static int __init test_bpf_init(void)
+{
+ return test_bpf();
+}
+
+static void __exit test_bpf_exit(void)
+{
+}
+
+module_init(test_bpf_init);
+module_exit(test_bpf_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000000..86374c1c49a4
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
+/*
+ * This module provides an interface to trigger and test firmware loading.
+ *
+ * It is designed to be used for basic evaluation of the firmware loading
+ * subsystem (for example when validating firmware verification). It lacks
+ * any extra dependencies, and will not normally be loaded by the system
+ * unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static DEFINE_MUTEX(test_fw_mutex);
+static const struct firmware *test_firmware;
+
+static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_firmware)
+ rc = simple_read_from_buffer(buf, size, offset,
+ test_firmware->data,
+ test_firmware->size);
+ mutex_unlock(&test_fw_mutex);
+ return rc;
+}
+
+static const struct file_operations test_fw_fops = {
+ .owner = THIS_MODULE,
+ .read = test_fw_misc_read,
+};
+
+static struct miscdevice test_fw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "test_firmware",
+ .fops = &test_fw_fops,
+};
+
+static ssize_t trigger_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kzalloc(count + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOSPC;
+ memcpy(name, buf, count);
+
+ pr_info("loading '%s'\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ test_firmware = NULL;
+ rc = request_firmware(&test_firmware, name, dev);
+ if (rc)
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
+ mutex_unlock(&test_fw_mutex);
+
+ kfree(name);
+
+ return count;
+}
+static DEVICE_ATTR_WO(trigger_request);
+
+static int __init test_firmware_init(void)
+{
+ int rc;
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+ }
+ rc = device_create_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ if (rc) {
+ pr_err("could not create sysfs interface: %d\n", rc);
+ goto dereg;
+ }
+
+ pr_warn("interface ready\n");
+
+ return 0;
+dereg:
+ misc_deregister(&test_fw_misc_device);
+ return rc;
+}
+
+module_init(test_firmware_init);
+
+static void __exit test_firmware_exit(void)
+{
+ release_firmware(test_firmware);
+ device_remove_file(test_fw_misc_device.this_device,
+ &dev_attr_trigger_request);
+ misc_deregister(&test_fw_misc_device);
+ pr_warn("removed interface\n");
+}
+
+module_exit(test_firmware_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index e0cc0146ae62..0c7e9ab2d88f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -159,6 +159,7 @@ errout:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_register);
/**
* textsearch_unregister - unregister a textsearch module
@@ -190,6 +191,7 @@ out:
spin_unlock(&ts_mod_lock);
return err;
}
+EXPORT_SYMBOL(textsearch_unregister);
struct ts_linear_state
{
@@ -236,6 +238,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
return textsearch_find(conf, state);
}
+EXPORT_SYMBOL(textsearch_find_continuous);
/**
* textsearch_prepare - Prepare a search
@@ -298,6 +301,7 @@ errout:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(textsearch_prepare);
/**
* textsearch_destroy - destroy a search configuration
@@ -316,9 +320,4 @@ void textsearch_destroy(struct ts_config *conf)
kfree(conf);
}
-
-EXPORT_SYMBOL(textsearch_register);
-EXPORT_SYMBOL(textsearch_unregister);
-EXPORT_SYMBOL(textsearch_prepare);
-EXPORT_SYMBOL(textsearch_find_continuous);
EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0648291cdafe..6fe2c84eb055 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2347,7 +2347,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
base = 10;
- is_sign = 0;
+ is_sign = false;
switch (*fmt++) {
case 'c':
@@ -2386,7 +2386,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
case 'i':
base = 0;
case 'd':
- is_sign = 1;
+ is_sign = true;
case 'u':
break;
case '%':
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 08837db52d94..12d2d777f36b 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -9,33 +9,33 @@ config XZ_DEC
if XZ_DEC
config XZ_DEC_X86
- bool "x86 BCJ filter decoder"
- default y if X86
+ bool "x86 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_POWERPC
- bool "PowerPC BCJ filter decoder"
- default y if PPC
+ bool "PowerPC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder"
- default y if IA64
+ bool "IA-64 BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARM
- bool "ARM BCJ filter decoder"
- default y if ARM
+ bool "ARM BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_ARMTHUMB
- bool "ARM-Thumb BCJ filter decoder"
- default y if (ARM && ARM_THUMB)
+ bool "ARM-Thumb BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
config XZ_DEC_SPARC
- bool "SPARC BCJ filter decoder"
- default y if SPARC
+ bool "SPARC BCJ filter decoder" if EXPERT
+ default y
select XZ_DEC_BCJ
endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index a6cdc969ea42..08c3c8049998 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1043,6 +1043,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ /* Fall through */
+
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
return XZ_DATA_ERROR;
@@ -1053,6 +1055,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
+ /* Fall through */
+
case SEQ_LZMA_RUN:
/*
* Set dictionary limit to indicate how much we want
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index d63381e8e333..d20ef458f137 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -250,52 +250,6 @@ int zlib_deflateInit2(
}
/* ========================================================================= */
-#if 0
-int zlib_deflateSetDictionary(
- z_streamp strm,
- const Byte *dictionary,
- uInt dictLength
-)
-{
- deflate_state *s;
- uInt length = dictLength;
- uInt n;
- IPos hash_head = 0;
-
- if (strm == NULL || strm->state == NULL || dictionary == NULL)
- return Z_STREAM_ERROR;
-
- s = (deflate_state *) strm->state;
- if (s->status != INIT_STATE) return Z_STREAM_ERROR;
-
- strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
-
- if (length < MIN_MATCH) return Z_OK;
- if (length > MAX_DIST(s)) {
- length = MAX_DIST(s);
-#ifndef USE_DICT_HEAD
- dictionary += dictLength - length; /* use the tail of the dictionary */
-#endif
- }
- memcpy((char *)s->window, dictionary, length);
- s->strstart = length;
- s->block_start = (long)length;
-
- /* Insert all strings in the hash table (except for the last two bytes).
- * s->lookahead stays null, so s->ins_h will be recomputed at the next
- * call of fill_window.
- */
- s->ins_h = s->window[0];
- UPDATE_HASH(s, s->ins_h, s->window[1]);
- for (n = 0; n <= length - MIN_MATCH; n++) {
- INSERT_STRING(s, n, hash_head);
- }
- if (hash_head) hash_head = 0; /* to make compiler happy */
- return Z_OK;
-}
-#endif /* 0 */
-
-/* ========================================================================= */
int zlib_deflateReset(
z_streamp strm
)
@@ -326,45 +280,6 @@ int zlib_deflateReset(
return Z_OK;
}
-/* ========================================================================= */
-#if 0
-int zlib_deflateParams(
- z_streamp strm,
- int level,
- int strategy
-)
-{
- deflate_state *s;
- compress_func func;
- int err = Z_OK;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- if (level == Z_DEFAULT_COMPRESSION) {
- level = 6;
- }
- if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- func = configuration_table[s->level].func;
-
- if (func != configuration_table[level].func && strm->total_in != 0) {
- /* Flush the last buffer: */
- err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
- }
- if (s->level != level) {
- s->level = level;
- s->max_lazy_match = configuration_table[level].max_lazy;
- s->good_match = configuration_table[level].good_length;
- s->nice_match = configuration_table[level].nice_length;
- s->max_chain_length = configuration_table[level].max_chain;
- }
- s->strategy = strategy;
- return err;
-}
-#endif /* 0 */
-
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
@@ -568,64 +483,6 @@ int zlib_deflateEnd(
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
}
-/* =========================================================================
- * Copy the source state to the destination state.
- */
-#if 0
-int zlib_deflateCopy (
- z_streamp dest,
- z_streamp source
-)
-{
-#ifdef MAXSEG_64K
- return Z_STREAM_ERROR;
-#else
- deflate_state *ds;
- deflate_state *ss;
- ush *overlay;
- deflate_workspace *mem;
-
-
- if (source == NULL || dest == NULL || source->state == NULL) {
- return Z_STREAM_ERROR;
- }
-
- ss = (deflate_state *) source->state;
-
- *dest = *source;
-
- mem = (deflate_workspace *) dest->workspace;
-
- ds = &(mem->deflate_memory);
-
- dest->state = (struct internal_state *) ds;
- *ds = *ss;
- ds->strm = dest;
-
- ds->window = (Byte *) mem->window_memory;
- ds->prev = (Pos *) mem->prev_memory;
- ds->head = (Pos *) mem->head_memory;
- overlay = (ush *) mem->overlay_memory;
- ds->pending_buf = (uch *) overlay;
-
- memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
- memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
- memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
- memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
-
- ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
-
- ds->l_desc.dyn_tree = ds->dyn_ltree;
- ds->d_desc.dyn_tree = ds->dyn_dtree;
- ds->bl_desc.dyn_tree = ds->bl_tree;
-
- return Z_OK;
-#endif
-}
-#endif /* 0 */
-
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index f5ce87b0800e..58a733b10387 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -45,21 +45,6 @@ int zlib_inflateReset(z_streamp strm)
return Z_OK;
}
-#if 0
-int zlib_inflatePrime(z_streamp strm, int bits, int value)
-{
- struct inflate_state *state;
-
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR;
- value &= (1L << bits) - 1;
- state->hold += value << state->bits;
- state->bits += bits;
- return Z_OK;
-}
-#endif
-
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
@@ -761,123 +746,6 @@ int zlib_inflateEnd(z_streamp strm)
return Z_OK;
}
-#if 0
-int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary,
- uInt dictLength)
-{
- struct inflate_state *state;
- unsigned long id;
-
- /* check state */
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (state->wrap != 0 && state->mode != DICT)
- return Z_STREAM_ERROR;
-
- /* check for correct dictionary id */
- if (state->mode == DICT) {
- id = zlib_adler32(0L, NULL, 0);
- id = zlib_adler32(id, dictionary, dictLength);
- if (id != state->check)
- return Z_DATA_ERROR;
- }
-
- /* copy dictionary to window */
- zlib_updatewindow(strm, strm->avail_out);
-
- if (dictLength > state->wsize) {
- memcpy(state->window, dictionary + dictLength - state->wsize,
- state->wsize);
- state->whave = state->wsize;
- }
- else {
- memcpy(state->window + state->wsize - dictLength, dictionary,
- dictLength);
- state->whave = dictLength;
- }
- state->havedict = 1;
- return Z_OK;
-}
-#endif
-
-#if 0
-/*
- Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
- or when out of input. When called, *have is the number of pattern bytes
- found in order so far, in 0..3. On return *have is updated to the new
- state. If on return *have equals four, then the pattern was found and the
- return value is how many bytes were read including the last byte of the
- pattern. If *have is less than four, then the pattern has not been found
- yet and the return value is len. In the latter case, zlib_syncsearch() can be
- called again with more data and the *have state. *have is initialized to
- zero for the first call.
- */
-static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf,
- unsigned len)
-{
- unsigned got;
- unsigned next;
-
- got = *have;
- next = 0;
- while (next < len && got < 4) {
- if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
- got++;
- else if (buf[next])
- got = 0;
- else
- got = 4 - got;
- next++;
- }
- *have = got;
- return next;
-}
-#endif
-
-#if 0
-int zlib_inflateSync(z_streamp strm)
-{
- unsigned len; /* number of bytes to look at or looked at */
- unsigned long in, out; /* temporary to save total_in and total_out */
- unsigned char buf[4]; /* to restore bit buffer to byte string */
- struct inflate_state *state;
-
- /* check parameters */
- if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
- state = (struct inflate_state *)strm->state;
- if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
-
- /* if first time, start search in bit buffer */
- if (state->mode != SYNC) {
- state->mode = SYNC;
- state->hold <<= state->bits & 7;
- state->bits -= state->bits & 7;
- len = 0;
- while (state->bits >= 8) {
- buf[len++] = (unsigned char)(state->hold);
- state->hold >>= 8;
- state->bits -= 8;
- }
- state->have = 0;
- zlib_syncsearch(&(state->have), buf, len);
- }
-
- /* search available input */
- len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in);
- strm->avail_in -= len;
- strm->next_in += len;
- strm->total_in += len;
-
- /* return no joy or set up to restart inflate() on a new block */
- if (state->have != 4) return Z_DATA_ERROR;
- in = strm->total_in; out = strm->total_out;
- zlib_inflateReset(strm);
- strm->total_in = in; strm->total_out = out;
- state->mode = TYPE;
- return Z_OK;
-}
-#endif
-
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";