summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug132
-rw-r--r--lib/Kconfig.kasan74
-rw-r--r--lib/Kconfig.kgdb15
-rw-r--r--lib/Kconfig.ubsan137
-rw-r--r--lib/Makefile15
-rw-r--r--lib/asn1_decoder.c4
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/bitfield_kunit.c (renamed from lib/test_bitfield.c)90
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/bootconfig.c4
-rw-r--r--lib/buildid.c149
-rw-r--r--lib/cmdline.c48
-rw-r--r--lib/cmdline_kunit.c156
-rw-r--r--lib/cpumask.c34
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crc32test.c4
-rw-r--r--lib/crc7.c2
-rw-r--r--lib/crypto/blake2s-selftest.c2
-rw-r--r--lib/crypto/blake2s.c48
-rw-r--r--lib/crypto/chacha20poly1305.c5
-rw-r--r--lib/crypto/curve25519.c2
-rw-r--r--lib/crypto/sha256.c214
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/decompress_unzstd.c7
-rw-r--r--lib/devmem_is_allowed.c27
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dim/net_dim.c2
-rw-r--r--lib/dim/rdma_dim.c4
-rw-r--r--lib/dump_stack.c2
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/dynamic_queue_limits.c4
-rw-r--r--lib/earlycpio.c2
-rw-r--r--lib/errname.c1
-rw-r--r--lib/error-inject.c2
-rw-r--r--lib/errseq.c1
-rw-r--r--lib/fault-inject-usercopy.c39
-rw-r--r--lib/find_bit.c4
-rw-r--r--lib/fonts/font_10x18.c3
-rw-r--r--lib/fonts/font_6x10.c3
-rw-r--r--lib/fonts/font_6x11.c3
-rw-r--r--lib/fonts/font_6x8.c9
-rw-r--r--lib/fonts/font_7x14.c3
-rw-r--r--lib/fonts/font_8x16.c3
-rw-r--r--lib/fonts/font_8x8.c3
-rw-r--r--lib/fonts/font_acorn_8x8.c3
-rw-r--r--lib/fonts/font_mini_4x6.c3
-rw-r--r--lib/fonts/font_pearl_8x8.c3
-rw-r--r--lib/fonts/font_sun12x22.c3
-rw-r--r--lib/fonts/font_sun8x16.c3
-rw-r--r--lib/fonts/font_ter16x32.c9
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/glob.c2
-rw-r--r--lib/hexdump.c1
-rw-r--r--lib/idr.c10
-rw-r--r--lib/iov_iter.c52
-rw-r--r--lib/kstrtox.c1
-rw-r--r--lib/kunit/Kconfig1
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/assert.c39
-rw-r--r--lib/kunit/executor.c118
-rw-r--r--lib/kunit/test.c72
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/linear_ranges.c8
-rw-r--r--lib/locking-selftest.c432
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4defs.h1
-rw-r--r--lib/lzo/lzo1x_compress.c2
-rw-r--r--lib/math/div64.c4
-rw-r--r--lib/math/int_pow.c2
-rw-r--r--lib/math/int_sqrt.c3
-rw-r--r--lib/math/rational.c2
-rw-r--r--lib/math/reciprocal_div.c10
-rw-r--r--lib/mpi/ec.c3
-rw-r--r--lib/mpi/mpi-bit.c2
-rw-r--r--lib/nlattr.c166
-rw-r--r--lib/nmi_backtrace.c6
-rw-r--r--lib/parman.c1
-rw-r--r--lib/percpu-refcount.c12
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/random32.c525
-rw-r--r--lib/sbitmap.c44
-rw-r--r--lib/scatterlist.c139
-rw-r--r--lib/sha1.c2
-rw-r--r--lib/siphash.c36
-rw-r--r--lib/smp_processor_id.c5
-rw-r--r--lib/stackdepot.c11
-rw-r--r--lib/string.c4
-rw-r--r--lib/strncpy_from_user.c22
-rw-r--r--lib/syscall.c13
-rw-r--r--lib/test_bitmap.c94
-rw-r--r--lib/test_bpf.c21
-rw-r--r--lib/test_firmware.c9
-rw-r--r--lib/test_fpu.c6
-rw-r--r--lib/test_free_pages.c5
-rw-r--r--lib/test_hmm.c2
-rw-r--r--lib/test_kasan.c151
-rw-r--r--lib/test_kasan_module.c31
-rw-r--r--lib/test_kmod.c26
-rw-r--r--lib/test_lockup.c16
-rw-r--r--lib/test_printf.c16
-rw-r--r--lib/test_sysctl.c2
-rw-r--r--lib/test_ubsan.c74
-rw-r--r--lib/test_xarray.c97
-rw-r--r--lib/timerqueue.c28
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ubsan.c33
-rw-r--r--lib/ubsan.h6
-rw-r--r--lib/usercopy.c5
-rw-r--r--lib/vsprintf.c51
-rw-r--r--lib/xarray.c233
-rw-r--r--lib/xz/xz_dec_lzma2.c4
-rw-r--r--lib/xz/xz_dec_stream.c16
-rw-r--r--lib/zlib_dfltcc/Makefile2
-rw-r--r--lib/zlib_dfltcc/dfltcc.c6
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.c3
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.c7
-rw-r--r--lib/zlib_dfltcc/dfltcc_syms.c17
-rw-r--r--lib/zlib_inflate/inflate.c24
-rw-r--r--lib/zstd/bitstream.h11
-rw-r--r--lib/zstd/compress.c2
-rw-r--r--lib/zstd/decompress.c12
-rw-r--r--lib/zstd/huf_compress.c5
125 files changed, 3030 insertions, 1086 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index b46a9fd122c8..46806332a8cc 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -686,6 +686,9 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+config GENERIC_LIB_DEVMEM_IS_ALLOWED
+ bool
+
config PLDMFW
bool
default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 491789a793ae..5ea0c1773b0a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -212,9 +212,10 @@ config DEBUG_INFO
If unsure, say N.
+if DEBUG_INFO
+
config DEBUG_INFO_REDUCED
bool "Reduce debugging information"
- depends on DEBUG_INFO
help
If you say Y here gcc is instructed to generate less debugging
information for structure types. This means that tools that
@@ -227,7 +228,6 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_COMPRESSED
bool "Compressed debugging information"
- depends on DEBUG_INFO
depends on $(cc-option,-gz=zlib)
depends on $(ld-option,--compress-debug-sections=zlib)
help
@@ -243,7 +243,6 @@ config DEBUG_INFO_COMPRESSED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
depends on $(cc-option,-gsplit-dwarf)
help
Generate debug info into separate .dwo files. This significantly
@@ -259,7 +258,6 @@ config DEBUG_INFO_SPLIT
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
- depends on DEBUG_INFO
depends on $(cc-option,-gdwarf-4)
help
Generate dwarf4 debug info. This requires recent versions
@@ -269,7 +267,6 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
- depends on DEBUG_INFO
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
help
@@ -277,9 +274,17 @@ config DEBUG_INFO_BTF
Turning this on expects presence of pahole tool, which will convert
DWARF type info into equivalent deduplicated BTF type info.
+config PAHOLE_HAS_SPLIT_BTF
+ def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
+
+config DEBUG_INFO_BTF_MODULES
+ def_bool y
+ depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ help
+ Generate compact split BTF type information for kernel modules.
+
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
- depends on DEBUG_INFO
help
This creates the required links to GDB helper scripts in the
build directory. If you load vmlinux into gdb, the helper
@@ -288,13 +293,7 @@ config GDB_SCRIPTS
instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
for further details.
-config ENABLE_MUST_CHECK
- bool "Enable __must_check logic"
- default y
- help
- Enable the __must_check logic in the kernel build. Disable this to
- suppress the "warning: ignoring return value of 'foo', declared with
- attribute warn_unused_result" messages.
+endif # DEBUG_INFO
config FRAME_WARN
int "Warn for stack frames larger than"
@@ -851,9 +850,31 @@ config DEBUG_PER_CPU_MAPS
Say N if unsure.
+config DEBUG_KMAP_LOCAL
+ bool "Debug kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && KMAP_LOCAL
+ help
+ This option enables additional error checking for the kmap_local
+ infrastructure. Disable for production use.
+
+config ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ bool
+
+config DEBUG_KMAP_LOCAL_FORCE_MAP
+ bool "Enforce kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select KMAP_LOCAL
+ select DEBUG_KMAP_LOCAL
+ help
+ This option enforces temporary mappings through the kmap_local
+ mechanism for non-highmem pages and on non-highmem systems.
+ Disable this for production systems!
+
config DEBUG_HIGHMEM
bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM
+ select DEBUG_KMAP_LOCAL_FORCE_MAP if ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select DEBUG_KMAP_LOCAL
help
This option enables additional error checking for high memory
systems. Disable for production systems.
@@ -1314,6 +1335,7 @@ config LOCKDEP_SMALL
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
+ select DEBUG_IRQFLAGS
help
If you say Y here, the lock dependency engine will do
additional runtime checks to debug itself, at the price
@@ -1367,6 +1389,27 @@ config WW_MUTEX_SELFTEST
Say M if you want these self tests to build as a module.
Say N if you are unsure.
+config SCF_TORTURE_TEST
+ tristate "torture tests for smp_call_function*()"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ help
+ This option provides a kernel module that runs torture tests
+ on the smp_call_function() family of primitives. The kernel
+ module may be built after the fact on the running kernel to
+ be tested, if desired.
+
+config CSD_LOCK_WAIT_DEBUG
+ bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
+ depends on DEBUG_KERNEL
+ depends on 64BIT
+ default n
+ help
+ This option enables debug prints when CPUs are slow to respond
+ to the smp_call_function*() IPI wrappers. These debug prints
+ include the IPI handler function currently executing (if any)
+ and relevant stack traces.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
@@ -1381,6 +1424,13 @@ config TRACE_IRQFLAGS_NMI
depends on TRACE_IRQFLAGS
depends on TRACE_IRQFLAGS_NMI_SUPPORT
+config DEBUG_IRQFLAGS
+ bool "Debug IRQ flag manipulation"
+ help
+ Enables checks for potentially unsafe enabling or disabling of
+ interrupts, such as calling raw_local_irq_restore() when interrupts
+ are enabled.
+
config STACKTRACE
bool "Stack backtrace support"
depends on STACKTRACE_SUPPORT
@@ -1626,7 +1676,7 @@ config ARCH_HAS_DEVMEM_IS_ALLOWED
config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
- depends on ARCH_HAS_DEVMEM_IS_ALLOWED
+ depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
default y if PPC || X86 || ARM64
help
If this option is disabled, you allow userspace (root) access to all
@@ -1768,6 +1818,13 @@ config FAIL_PAGE_ALLOC
help
Provide fault-injection capability for alloc_pages().
+config FAULT_INJECTION_USERCOPY
+ bool "Fault injection capability for usercopy functions"
+ depends on FAULT_INJECTION
+ help
+ Provides fault-injection capability to inject failures
+ in usercopy functions (copy_from_user(), get_user(), ...).
+
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION && BLOCK
@@ -2035,13 +2092,6 @@ config TEST_BITMAP
If unsure, say N.
-config TEST_BITFIELD
- tristate "Test bitfield functions at runtime"
- help
- Enable this option to test the bitfield functions at boot.
-
- If unsure, say N.
-
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
@@ -2191,6 +2241,33 @@ config TEST_SYSCTL
If unsure, say N.
+config BITFIELD_KUNIT
+ tristate "KUnit test bitfield functions at runtime"
+ depends on KUNIT
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config RESOURCE_KUNIT_TEST
+ tristate "KUnit test for resource API"
+ depends on KUNIT
+ help
+ This builds the resource API unit test.
+ Tests the logic of API provided by resource.c and ioport.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config SYSCTL_KUNIT_TEST
tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2234,6 +2311,17 @@ config LINEAR_RANGES_TEST
If unsure, say N.
+config CMDLINE_KUNIT_TEST
+ tristate "KUnit test for cmdline API"
+ depends on KUNIT
+ help
+ This builds the cmdline API unit test.
+ Tests the logic of API provided by cmdline.c.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config BITS_TEST
tristate "KUnit test for bits.h"
depends on KUNIT
@@ -2411,4 +2499,6 @@ config HYPERV_TESTING
endmenu # "Kernel Testing and Coverage"
+source "Documentation/Kconfig"
+
endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 542a9c18398e..f5fa4ba126bf 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -6,7 +6,10 @@ config HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS
bool
-config HAVE_ARCH_KASAN_VMALLOC
+config HAVE_ARCH_KASAN_HW_TAGS
+ bool
+
+config HAVE_ARCH_KASAN_VMALLOC
bool
config CC_HAS_KASAN_GENERIC
@@ -15,15 +18,20 @@ config CC_HAS_KASAN_GENERIC
config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
+# This option is only required for software KASAN modes.
+# Old GCC versions don't have proper support for no_sanitize_address.
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
menuconfig KASAN
bool "KASAN: runtime memory debugger"
- depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
- (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
+ depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
+ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
+ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
+ HAVE_ARCH_KASAN_HW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
- depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ select STACKDEPOT
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
@@ -35,21 +43,24 @@ choice
prompt "KASAN mode"
default KASAN_GENERIC
help
- KASAN has two modes: generic KASAN (similar to userspace ASan,
- x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
- software tag-based KASAN (a version based on software memory
- tagging, arm64 only, similar to userspace HWASan, enabled with
- CONFIG_KASAN_SW_TAGS).
+ KASAN has three modes:
+ 1. generic KASAN (similar to userspace ASan,
+ x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC),
+ 2. software tag-based KASAN (arm64 only, based on software
+ memory tagging (similar to userspace HWASan), enabled with
+ CONFIG_KASAN_SW_TAGS), and
+ 3. hardware tag-based KASAN (arm64 only, based on hardware
+ memory tagging, enabled with CONFIG_KASAN_HW_TAGS).
- Both generic and tag-based KASAN are strictly debugging features.
+ All KASAN modes are strictly debugging features.
+
+ For better error reports enable CONFIG_STACKTRACE.
config KASAN_GENERIC
bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
- select STACKDEPOT
help
Enables generic KASAN mode.
@@ -62,23 +73,22 @@ config KASAN_GENERIC
and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3.
- For better error detection enable CONFIG_STACKTRACE.
-
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
config KASAN_SW_TAGS
bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
- select STACKDEPOT
help
Enables software tag-based KASAN mode.
- This mode requires Top Byte Ignore support by the CPU and therefore
- is only supported for arm64. This mode requires Clang.
+ This mode require software memory tagging support in the form of
+ HWASan-like compiler instrumentation.
+
+ Currently this mode is only implemented for arm64 CPUs and relies on
+ Top Byte Ignore. This mode requires Clang.
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
@@ -86,15 +96,27 @@ config KASAN_SW_TAGS
casting and comparison, as it embeds tags into the top byte of each
pointer.
- For better error detection enable CONFIG_STACKTRACE.
-
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
+config KASAN_HW_TAGS
+ bool "Hardware tag-based mode"
+ depends on HAVE_ARCH_KASAN_HW_TAGS
+ depends on SLUB
+ help
+ Enables hardware tag-based KASAN mode.
+
+ This mode requires hardware memory tagging support, and can be used
+ by any architecture that provides it.
+
+ Currently this mode is only implemented for arm64 CPUs starting from
+ ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore.
+
endchoice
choice
prompt "Instrumentation type"
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
default KASAN_OUTLINE
config KASAN_OUTLINE
@@ -118,6 +140,7 @@ endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
@@ -136,15 +159,6 @@ config KASAN_STACK
default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
default 0
-config KASAN_S390_4_LEVEL_PAGING
- bool "KASan: use 4-level paging"
- depends on S390
- help
- Compiling the kernel with KASan disables automatic 3-level vs
- 4-level paging selection. 3-level paging is used by default (up
- to 3TB of RAM with KASan enabled). This options allows to force
- 4-level paging instead.
-
config KASAN_SW_TAGS_IDENTIFY
bool "Enable memory corruption identification"
depends on KASAN_SW_TAGS
@@ -155,7 +169,7 @@ config KASAN_SW_TAGS_IDENTIFY
config KASAN_VMALLOC
bool "Back mappings in vmalloc space with real shadow memory"
- depends on HAVE_ARCH_KASAN_VMALLOC
+ depends on KASAN_GENERIC && HAVE_ARCH_KASAN_VMALLOC
help
By default, the shadow region for vmalloc space is the read-only
zero page. This means that KASAN cannot detect errors involving
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 256f2486f9bd..05dae05b6cc9 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -24,6 +24,21 @@ menuconfig KGDB
if KGDB
+config KGDB_HONOUR_BLOCKLIST
+ bool "KGDB: use kprobe blocklist to prohibit unsafe breakpoints"
+ depends on HAVE_KPROBES
+ depends on MODULES
+ select KPROBES
+ default y
+ help
+ If set to Y the debug core will use the kprobe blocklist to
+ identify symbols where it is unsafe to set breakpoints.
+ In particular this disallows instrumentation of functions
+ called during debug trap handling and thus makes it very
+ difficult to inadvertently provoke recursive trap handling.
+
+ If unsure, say Y.
+
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 774315de555a..3a0b1c930733 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -14,6 +14,7 @@ if UBSAN
config UBSAN_TRAP
bool "On Sanitizer warnings, abort the running kernel code"
+ depends on !COMPILE_TEST
depends on $(cc-option, -fsanitize-undefined-trap-on-error)
help
Building kernels with Sanitizer features enabled tends to grow
@@ -36,10 +37,17 @@ config UBSAN_KCOV_BROKEN
See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
in newer releases.
+config CC_HAS_UBSAN_BOUNDS
+ def_bool $(cc-option,-fsanitize=bounds)
+
+config CC_HAS_UBSAN_ARRAY_BOUNDS
+ def_bool $(cc-option,-fsanitize=array-bounds)
+
config UBSAN_BOUNDS
bool "Perform array index bounds checking"
default UBSAN
depends on !UBSAN_KCOV_BROKEN
+ depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS
help
This option enables detection of directly indexed out of bounds
array accesses, where the array size is known at compile time.
@@ -47,22 +55,122 @@ config UBSAN_BOUNDS
to the {str,mem}*cpy() family of functions (that is addressed
by CONFIG_FORTIFY_SOURCE).
-config UBSAN_MISC
- bool "Enable all other Undefined Behavior sanity checks"
+config UBSAN_ONLY_BOUNDS
+ def_bool CC_HAS_UBSAN_BOUNDS && !CC_HAS_UBSAN_ARRAY_BOUNDS
+ depends on UBSAN_BOUNDS
+ help
+ This is a weird case: Clang's -fsanitize=bounds includes
+ -fsanitize=local-bounds, but it's trapping-only, so for
+ Clang, we must use -fsanitize=array-bounds when we want
+ traditional array bounds checking enabled. For GCC, we
+ want -fsanitize=bounds.
+
+config UBSAN_ARRAY_BOUNDS
+ def_bool CC_HAS_UBSAN_ARRAY_BOUNDS
+ depends on UBSAN_BOUNDS
+
+config UBSAN_LOCAL_BOUNDS
+ bool "Perform array local bounds checking"
+ depends on UBSAN_TRAP
+ depends on !UBSAN_KCOV_BROKEN
+ depends on $(cc-option,-fsanitize=local-bounds)
+ help
+ This option enables -fsanitize=local-bounds which traps when an
+ exception/error is detected. Therefore, it may only be enabled
+ with CONFIG_UBSAN_TRAP.
+
+ Enabling this option detects errors due to accesses through a
+ pointer that is derived from an object of a statically-known size,
+ where an added offset (which may not be known statically) is
+ out-of-bounds.
+
+config UBSAN_SHIFT
+ bool "Perform checking for bit-shift overflows"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=shift)
+ help
+ This option enables -fsanitize=shift which checks for bit-shift
+ operations that overflow to the left or go switch to negative
+ for signed types.
+
+config UBSAN_DIV_ZERO
+ bool "Perform checking for integer divide-by-zero"
+ depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+ help
+ This option enables -fsanitize=integer-divide-by-zero which checks
+ for integer division by zero. This is effectively redundant with the
+ kernel's existing exception handling, though it can provide greater
+ debugging information under CONFIG_UBSAN_REPORT_FULL.
+
+config UBSAN_UNREACHABLE
+ bool "Perform checking for unreachable code"
+ # objtool already handles unreachable checking and gets angry about
+ # seeing UBSan instrumentation located in unreachable places.
+ depends on !STACK_VALIDATION
+ depends on $(cc-option,-fsanitize=unreachable)
+ help
+ This option enables -fsanitize=unreachable which checks for control
+ flow reaching an expected-to-be-unreachable position.
+
+config UBSAN_SIGNED_OVERFLOW
+ bool "Perform checking for signed arithmetic overflow"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=signed-integer-overflow)
+ help
+ This option enables -fsanitize=signed-integer-overflow which checks
+ for overflow of any arithmetic operations with signed integers.
+
+config UBSAN_UNSIGNED_OVERFLOW
+ bool "Perform checking for unsigned arithmetic overflow"
+ depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on !X86_32 # avoid excessive stack usage on x86-32/clang
+ help
+ This option enables -fsanitize=unsigned-integer-overflow which checks
+ for overflow of any arithmetic operations with unsigned integers. This
+ currently causes x86 to fail to boot.
+
+config UBSAN_OBJECT_SIZE
+ bool "Perform checking for accesses beyond the end of objects"
default UBSAN
+ # gcc hugely expands stack usage with -fsanitize=object-size
+ # https://lore.kernel.org/lkml/CAHk-=wjPasyJrDuwDnpHJS2TuQfExwe=px-SzLeN8GFMAQJPmQ@mail.gmail.com/
+ depends on !CC_IS_GCC
+ depends on $(cc-option,-fsanitize=object-size)
help
- This option enables all sanity checks that don't have their
- own Kconfig options. Disable this if you only want to have
- individually selected checks.
+ This option enables -fsanitize=object-size which checks for accesses
+ beyond the end of objects where the optimizer can determine both the
+ object being operated on and its size, usually seen with bad downcasts,
+ or access to struct members from NULL pointers.
+
+config UBSAN_BOOL
+ bool "Perform checking for non-boolean values used as boolean"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=bool)
+ help
+ This option enables -fsanitize=bool which checks for boolean values being
+ loaded that are neither 0 nor 1.
+
+config UBSAN_ENUM
+ bool "Perform checking for out of bounds enum values"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=enum)
+ help
+ This option enables -fsanitize=enum which checks for values being loaded
+ into an enum that are outside the range of given values for the given enum.
+
+config UBSAN_ALIGNMENT
+ bool "Perform checking for misaligned pointer usage"
+ default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ depends on !UBSAN_TRAP && !COMPILE_TEST
+ depends on $(cc-option,-fsanitize=alignment)
+ help
+ This option enables the check of unaligned memory accesses.
+ Enabling this option on architectures that support unaligned
+ accesses may produce a lot of false positives.
config UBSAN_SANITIZE_ALL
bool "Enable instrumentation for the entire kernel"
depends on ARCH_HAS_UBSAN_SANITIZE_ALL
-
- # We build with -Wno-maybe-uninitilzed, but we still want to
- # use -Wmaybe-uninitilized in allmodconfig builds.
- # So dependsy bellow used to disable this option in allmodconfig
- depends on !COMPILE_TEST
default y
help
This option activates instrumentation for the entire kernel.
@@ -71,15 +179,6 @@ config UBSAN_SANITIZE_ALL
Enabling this option will get kernel image size increased
significantly.
-config UBSAN_ALIGNMENT
- bool "Enable checks for pointers alignment"
- default !HAVE_EFFICIENT_UNALIGNED_ACCESS
- depends on !UBSAN_TRAP
- help
- This option enables the check of unaligned memory accesses.
- Enabling this option on architectures that support unaligned
- accesses may produce a lot of false positives.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m
diff --git a/lib/Makefile b/lib/Makefile
index 49a2a9e36224..fb7d946bb8c3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -27,16 +27,14 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o += -fno-stack-protector
endif
-# Used by KCSAN while enabled, avoid recursion.
-KCSAN_SANITIZE_random32.o := n
-
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
+ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \
+ buildid.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -87,7 +85,6 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
-obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
@@ -108,7 +105,7 @@ obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
# get appended last to CFLAGS and thus override those previous compiler options.
#
-FPU_CFLAGS := -mhard-float -msse -msse2
+FPU_CFLAGS := -msse -msse2
ifdef CONFIG_CC_IS_GCC
# Stack alignment mismatch, proceed with caution.
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
@@ -121,6 +118,7 @@ ifdef CONFIG_CC_IS_GCC
# -mpreferred-stack-boundary=3 is not between 4 and 12
#
# can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += -mhard-float
FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
endif
@@ -210,6 +208,7 @@ obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
@@ -348,6 +347,10 @@ obj-$(CONFIG_OBJAGG) += objagg.o
obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+
+obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 58f72b25f8e9..13da529e2e72 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -381,7 +381,7 @@ next_op:
case ASN1_OP_END_SET_ACT:
if (unlikely(!(flags & FLAG_MATCHED)))
goto tag_mismatch;
- /* fall through */
+ fallthrough;
case ASN1_OP_END_SEQ:
case ASN1_OP_END_SET_OF:
@@ -448,7 +448,7 @@ next_op:
pc += asn1_op_lengths[op];
goto next_op;
}
- /* fall through */
+ fallthrough;
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 6f4bcf524554..04c98799c3ba 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1113,7 +1113,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
index_key))
goto found_leaf;
}
- /* fall through */
+ fallthrough;
case assoc_array_walk_tree_empty:
case assoc_array_walk_found_wrong_shortcut:
default:
diff --git a/lib/test_bitfield.c b/lib/bitfield_kunit.c
index 5b8f4108662d..1473d8b4bf0f 100644
--- a/lib/test_bitfield.c
+++ b/lib/bitfield_kunit.c
@@ -5,8 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <kunit/test.h>
#include <linux/bitfield.h>
#define CHECK_ENC_GET_U(tp, v, field, res) do { \
@@ -14,13 +13,11 @@
u##tp _res; \
\
_res = u##tp##_encode_bits(v, field); \
- if (_res != res) { \
- pr_warn("u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n",\
- (u64)_res); \
- return -EINVAL; \
- } \
- if (u##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
+ "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
+ (u64)_res); \
+ KUNIT_ASSERT_FALSE(context, \
+ u##tp##_get_bits(_res, field) != v); \
} \
} while (0)
@@ -29,14 +26,13 @@
__le##tp _res; \
\
_res = le##tp##_encode_bits(v, field); \
- if (_res != cpu_to_le##tp(res)) { \
- pr_warn("le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)le##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (le##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_le##tp(res), \
+ "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ le##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -45,14 +41,13 @@
__be##tp _res; \
\
_res = be##tp##_encode_bits(v, field); \
- if (_res != cpu_to_be##tp(res)) { \
- pr_warn("be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx\n",\
- (u64)be##tp##_to_cpu(_res), \
- (u64)(res)); \
- return -EINVAL; \
- } \
- if (be##tp##_get_bits(_res, field) != v) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_be##tp(res), \
+ "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ be##tp##_get_bits(_res, field) != v);\
} \
} while (0)
@@ -62,7 +57,7 @@
CHECK_ENC_GET_BE(tp, v, field, res); \
} while (0)
-static int test_constants(void)
+static void __init test_bitfields_constants(struct kunit *context)
{
/*
* NOTE
@@ -95,19 +90,17 @@ static int test_constants(void)
CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
-
- return 0;
}
#define CHECK(tp, mask) do { \
u64 v; \
\
for (v = 0; v < 1 << hweight32(mask); v++) \
- if (tp##_encode_bits(v, mask) != v << __ffs64(mask)) \
- return -EINVAL; \
+ KUNIT_ASSERT_FALSE(context, \
+ tp##_encode_bits(v, mask) != v << __ffs64(mask));\
} while (0)
-static int test_variables(void)
+static void __init test_bitfields_variables(struct kunit *context)
{
CHECK(u8, 0x0f);
CHECK(u8, 0xf0);
@@ -130,39 +123,32 @@ static int test_variables(void)
CHECK(u64, 0x000000007f000000ull);
CHECK(u64, 0x0000000018000000ull);
CHECK(u64, 0x0000001f8000000ull);
-
- return 0;
}
-static int __init test_bitfields(void)
-{
- int ret = test_constants();
-
- if (ret) {
- pr_warn("constant tests failed!\n");
- return ret;
- }
-
- ret = test_variables();
- if (ret) {
- pr_warn("variable tests failed!\n");
- return ret;
- }
-
#ifdef TEST_BITFIELD_COMPILE
+static void __init test_bitfields_compile(struct kunit *context)
+{
/* these should fail compilation */
CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
u32_encode_bits(7, 0x06000000);
/* this should at least give a warning */
u16_encode_bits(0, 0x60000);
+}
#endif
- pr_info("tests passed\n");
+static struct kunit_case __refdata bitfields_test_cases[] = {
+ KUNIT_CASE(test_bitfields_constants),
+ KUNIT_CASE(test_bitfields_variables),
+ {}
+};
- return 0;
-}
-module_init(test_bitfields)
+static struct kunit_suite bitfields_test_suite = {
+ .name = "bitfields",
+ .test_cases = bitfields_test_cases,
+};
+
+kunit_test_suites(&bitfields_test_suite);
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c13d859bc7ab..75006c4036e9 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -23,7 +23,7 @@
/**
* DOC: bitmap introduction
*
- * bitmaps provide an array of bits, implemented using an an
+ * bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
@@ -552,7 +552,7 @@ static inline bool end_of_region(char c)
}
/*
- * The format allows commas and whitespases at the beginning
+ * The format allows commas and whitespaces at the beginning
* of the region.
*/
static const char *bitmap_find_region(const char *str)
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 649ed44f199c..9f8c70a98fcf 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -827,7 +827,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
q - 2);
break;
}
- /* fall through */
+ fallthrough;
case '=':
ret = xbc_parse_kv(&p, q, c);
break;
@@ -836,7 +836,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos)
break;
case '#':
q = skip_comment(q);
- /* fall through */
+ fallthrough;
case ';':
case '\n':
ret = xbc_parse_key(&p, q);
diff --git a/lib/buildid.c b/lib/buildid.c
new file mode 100644
index 000000000000..6156997c3895
--- /dev/null
+++ b/lib/buildid.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/buildid.h>
+#include <linux/elf.h>
+#include <linux/pagemap.h>
+
+#define BUILD_ID 3
+/*
+ * Parse build id from the note segment. This logic can be shared between
+ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
+ * identical.
+ */
+static inline int parse_build_id(void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ void *note_start,
+ Elf32_Word note_size)
+{
+ Elf32_Word note_offs = 0, new_offs;
+
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+
+ if (nhdr->n_type == BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU") &&
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+ memcpy(build_id,
+ note_start + note_offs +
+ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ if (size)
+ *size = nhdr->n_descsz;
+ return 0;
+ }
+ new_offs = note_offs + sizeof(Elf32_Nhdr) +
+ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+ if (new_offs <= note_offs) /* overflow */
+ break;
+ note_offs = new_offs;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 64-bit ELF */
+static int get_build_id_64(void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Returns 0 on success, otherwise error (< 0).
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr;
+ struct page *page;
+ void *page_addr;
+ int ret;
+
+ /* only works for page backed storage */
+ if (!vma->vm_file)
+ return -EINVAL;
+
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
+
+ ret = -EINVAL;
+ page_addr = kmap_atomic(page);
+ ehdr = (Elf32_Ehdr *)page_addr;
+
+ /* compare magic x7f "ELF" */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
+ goto out;
+
+ /* only support executable file and shared object file */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
+ goto out;
+
+ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
+ ret = get_build_id_32(page_addr, build_id, size);
+ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ ret = get_build_id_64(page_addr, build_id, size);
+out:
+ kunmap_atomic(page_addr);
+ put_page(page);
+ return ret;
+}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index fbb9981a04a4..dfd4c4423f9a 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -35,25 +35,37 @@ static int get_range(char **str, int *pint, int n)
/**
* get_option - Parse integer from an option string
* @str: option string
- * @pint: (output) integer value parsed from @str
+ * @pint: (optional output) integer value parsed from @str
*
* Read an int from an option string; if available accept a subsequent
* comma as well.
*
+ * When @pint is NULL the function can be used as a validator of
+ * the current option in the string.
+ *
* Return values:
* 0 - no int in string
* 1 - int found, no subsequent comma
* 2 - int found including a subsequent comma
* 3 - hyphen found to denote a range
+ *
+ * Leading hyphen without integer is no integer case, but we consume it
+ * for the sake of simplification.
*/
int get_option(char **str, int *pint)
{
char *cur = *str;
+ int value;
if (!cur || !(*cur))
return 0;
- *pint = simple_strtol(cur, str, 0);
+ if (*cur == '-')
+ value = -simple_strtoull(++cur, str, 0);
+ else
+ value = simple_strtoull(cur, str, 0);
+ if (pint)
+ *pint = value;
if (cur == *str)
return 0;
if (**str == ',') {
@@ -71,7 +83,7 @@ EXPORT_SYMBOL(get_option);
* get_options - Parse a string into a list of integers
* @str: String to be parsed
* @nints: size of integer array
- * @ints: integer array
+ * @ints: integer array (must have room for at least one element)
*
* This function parses a string containing a comma-separated
* list of integers, a hyphen-separated range of _positive_ integers,
@@ -79,6 +91,14 @@ EXPORT_SYMBOL(get_option);
* full, or when no more numbers can be retrieved from the
* string.
*
+ * When @nints is 0, the function just validates the given @str and
+ * returns the amount of parseable integers as described below.
+ *
+ * Returns:
+ *
+ * The first element is filled by the number of collected integers
+ * in the range. The rest is what was parsed from the @str.
+ *
* Return value is the character in the string which caused
* the parse to end (typically a null terminator, if @str is
* completely parseable).
@@ -86,15 +106,20 @@ EXPORT_SYMBOL(get_option);
char *get_options(const char *str, int nints, int *ints)
{
+ bool validate = (nints == 0);
int res, i = 1;
- while (i < nints) {
- res = get_option((char **)&str, ints + i);
+ while (i < nints || validate) {
+ int *pint = validate ? ints : ints + i;
+
+ res = get_option((char **)&str, pint);
if (res == 0)
break;
if (res == 3) {
+ int n = validate ? 0 : nints - i;
int range_nums;
- range_nums = get_range((char **)&str, ints + i, nints - i);
+
+ range_nums = get_range((char **)&str, pint, n);
if (range_nums < 0)
break;
/*
@@ -132,27 +157,28 @@ unsigned long long memparse(const char *ptr, char **retptr)
case 'E':
case 'e':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'P':
case 'p':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'T':
case 't':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'G':
case 'g':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'M':
case 'm':
ret <<= 10;
- /* fall through */
+ fallthrough;
case 'K':
case 'k':
ret <<= 10;
endptr++;
+ fallthrough;
default:
break;
}
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
new file mode 100644
index 000000000000..018bfc8113c4
--- /dev/null
+++ b/lib/cmdline_kunit.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for API provided by cmdline.c
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const char *cmdline_test_strings[] = {
+ "\"\"", "" , "=" , "\"-", "," , "-," , ",-" , "-" ,
+ "+," , "--", ",,", "''" , "\"\",", "\",\"", "-\"\"", "\"",
+};
+
+static const int cmdline_test_values[] = {
+ 1, 1, 1, 1, 2, 3, 2, 3,
+ 1, 3, 2, 1, 1, 1, 3, 1,
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_strings) == ARRAY_SIZE(cmdline_test_values));
+
+static const char *cmdline_test_range_strings[] = {
+ "-7" , "--7" , "-1-2" , "7--9",
+ "7-" , "-7--9", "7-9," , "9-7" ,
+ "5-a", "a-5" , "5-8" , ",8-5",
+ "+,1", "-,4" , "-3,0-1,6", "4,-" ,
+ " +2", " -9" , "0-1,-3,6", "- 9" ,
+};
+
+static const int cmdline_test_range_values[][16] = {
+ { 1, -7, }, { 0, -0, }, { 4, -1, 0, +1, 2, }, { 0, 7, },
+ { 0, +7, }, { 0, -7, }, { 3, +7, 8, +9, 0, }, { 0, 9, },
+ { 0, +5, }, { 0, -0, }, { 4, +5, 6, +7, 8, }, { 0, 0, },
+ { 0, +0, }, { 0, -0, }, { 4, -3, 0, +1, 6, }, { 1, 4, },
+ { 0, +0, }, { 0, -0, }, { 4, +0, 1, -3, 6, }, { 0, 0, },
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_range_strings) == ARRAY_SIZE(cmdline_test_range_values));
+
+static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset)
+{
+ const char *fmt = "Pattern: %s";
+ const char *out = in;
+ int dummy;
+ int ret;
+
+ ret = get_option((char **)&out, &dummy);
+
+ KUNIT_EXPECT_EQ_MSG(test, ret, rc, fmt, in);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, out, in + offset, fmt, in);
+}
+
+static void cmdline_test_noint(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = 0;
+ int offset;
+
+ /* Only first and leading '-' will advance the pointer */
+ offset = !!(*str == '-');
+ cmdline_do_one_test(test, str, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_lead_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = cmdline_test_values[i];
+ int offset;
+
+ sprintf(in, "%u%s", get_random_int() % 256, str);
+ /* Only first '-' after the number will advance the pointer */
+ offset = strlen(in) - strlen(str) + !!(rc == 2);
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_tail_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ /* When "" or "-" the result will be valid integer */
+ int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1;
+ int offset;
+
+ sprintf(in, "%s%u", str, get_random_int() % 256);
+ /*
+ * Only first and leading '-' not followed by integer
+ * will advance the pointer.
+ */
+ offset = rc ? strlen(in) : !!(*str == '-');
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ unsigned int n, const int *e)
+{
+ unsigned int i;
+ int r[16];
+ int *p;
+
+ memset(r, 0, sizeof(r));
+ get_options(in, ARRAY_SIZE(r), r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (parsed) expected %d numbers, got %d",
+ n, e[0], r[0]);
+ for (i = 1; i < ARRAY_SIZE(r); i++)
+ KUNIT_EXPECT_EQ_MSG(test, r[i], e[i], "in test %u at %u", n, i);
+
+ memset(r, 0, sizeof(r));
+ get_options(in, 0, r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (validated) expected %d numbers, got %d",
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, (int *)0, "in test %u at %u out of bound", n, p - r);
+}
+
+static void cmdline_test_range(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_range_strings[i];
+ const int *e = cmdline_test_range_values[i];
+
+ cmdline_do_one_range_test(test, str, i, e);
+ } while (++i < ARRAY_SIZE(cmdline_test_range_strings));
+}
+
+static struct kunit_case cmdline_test_cases[] = {
+ KUNIT_CASE(cmdline_test_noint),
+ KUNIT_CASE(cmdline_test_lead_int),
+ KUNIT_CASE(cmdline_test_tail_int),
+ KUNIT_CASE(cmdline_test_range),
+ {}
+};
+
+static struct kunit_suite cmdline_test_suite = {
+ .name = "cmdline",
+ .test_cases = cmdline_test_cases,
+};
+kunit_test_suite(cmdline_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 85da6ab4fbb5..c3c76b833384 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
-#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu, hk_flags;
- const struct cpumask *mask;
+ int cpu;
- hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
- mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
- i %= cpumask_weight(mask);
+ i %= num_online_cpus();
if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, mask) {
+ for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
- }
} else {
/* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
+ for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
if (i-- == 0)
return cpu;
- }
- for_each_cpu(cpu, mask) {
+ for_each_cpu(cpu, cpu_online_mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;
@@ -267,3 +261,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
return next;
}
EXPORT_SYMBOL(cpumask_any_and_distribute);
+
+int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = cpumask_next(prev, srcp);
+ if (next >= nr_cpu_ids)
+ next = cpumask_first(srcp);
+
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_distribute);
diff --git a/lib/crc32.c b/lib/crc32.c
index 35a03d03f973..2a68dfd3b96c 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
return crc;
}
-#if CRC_LE_BITS == 1
+#if CRC_BE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
diff --git a/lib/crc32test.c b/lib/crc32test.c
index 97d6a57cefcc..61ddce2cff77 100644
--- a/lib/crc32test.c
+++ b/lib/crc32test.c
@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */
local_irq_save(flags);
- local_irq_disable();
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
@@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
- local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
diff --git a/lib/crc7.c b/lib/crc7.c
index 6a848d73e804..3848e313b722 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -51,7 +51,7 @@ const u8 crc7_be_syndrome_table[256] = {
EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
- * crc7 - update the CRC7 for the data buffer
+ * crc7_be - update the CRC7 for the data buffer
* @crc: previous CRC7 value
* @buffer: data pointer
* @len: number of bytes in the buffer
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
index 79ef404a990d..5d9ea53be973 100644
--- a/lib/crypto/blake2s-selftest.c
+++ b/lib/crypto/blake2s-selftest.c
@@ -3,7 +3,7 @@
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
-#include <crypto/blake2s.h>
+#include <crypto/internal/blake2s.h>
#include <linux/string.h>
/*
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 41025a30c524..c64ac8bfb6a9 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -15,57 +15,23 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bug.h>
-#include <asm/unaligned.h>
-bool blake2s_selftest(void);
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+# define blake2s_compress blake2s_compress_arch
+#else
+# define blake2s_compress blake2s_compress_generic
+#endif
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, state->buf, 1,
- BLAKE2S_BLOCK_SIZE);
- else
- blake2s_compress_generic(state, state->buf, 1,
- BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2S_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, in, nblocks - 1,
- BLAKE2S_BLOCK_SIZE);
- else
- blake2s_compress_generic(state, in, nblocks - 1,
- BLAKE2S_BLOCK_SIZE);
- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ __blake2s_update(state, in, inlen, blake2s_compress);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S))
- blake2s_compress_arch(state, state->buf, 1, state->buflen);
- else
- blake2s_compress_generic(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
+ __blake2s_final(state, out, blake2s_compress);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index 5850f3b87359..c2fcdb98cc02 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -362,7 +362,12 @@ static int __init mod_init(void)
return 0;
}
+static void __exit mod_exit(void)
+{
+}
+
module_init(mod_init);
+module_exit(mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
index 288a62cd29b2..fb29739e8c29 100644
--- a/lib/crypto/curve25519.c
+++ b/lib/crypto/curve25519.c
@@ -13,8 +13,6 @@
#include <linux/module.h>
#include <linux/init.h>
-bool curve25519_selftest(void);
-
static int __init mod_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 2321f6cb322f..72a4b0b1df28 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -15,9 +15,28 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <asm/unaligned.h>
+static const u32 SHA256_K[] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
@@ -43,173 +62,68 @@ static inline void BLEND_OP(int I, u32 *W)
W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
}
-static void sha256_transform(u32 *state, const u8 *input)
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
+ u32 t1, t2; \
+ t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
+ t2 = e0(a) + Maj(a, b, c); \
+ d += t1; \
+ h = t1 + t2; \
+} while (0)
+
+static void sha256_transform(u32 *state, const u8 *input, u32 *W)
{
- u32 a, b, c, d, e, f, g, h, t1, t2;
- u32 W[64];
+ u32 a, b, c, d, e, f, g, h;
int i;
/* load the input */
- for (i = 0; i < 16; i++)
- LOAD_OP(i, W, input);
+ for (i = 0; i < 16; i += 8) {
+ LOAD_OP(i + 0, W, input);
+ LOAD_OP(i + 1, W, input);
+ LOAD_OP(i + 2, W, input);
+ LOAD_OP(i + 3, W, input);
+ LOAD_OP(i + 4, W, input);
+ LOAD_OP(i + 5, W, input);
+ LOAD_OP(i + 6, W, input);
+ LOAD_OP(i + 7, W, input);
+ }
/* now blend */
- for (i = 16; i < 64; i++)
- BLEND_OP(i, W);
+ for (i = 16; i < 64; i += 8) {
+ BLEND_OP(i + 0, W);
+ BLEND_OP(i + 1, W);
+ BLEND_OP(i + 2, W);
+ BLEND_OP(i + 3, W);
+ BLEND_OP(i + 4, W);
+ BLEND_OP(i + 5, W);
+ BLEND_OP(i + 6, W);
+ BLEND_OP(i + 7, W);
+ }
/* load the state into our registers */
a = state[0]; b = state[1]; c = state[2]; d = state[3];
e = state[4]; f = state[5]; g = state[6]; h = state[7];
/* now iterate */
- t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
-
- t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
- t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
- t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
- t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
- t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
- t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
- t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
- t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
- t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
- t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
- t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
- t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
- t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
- t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
- t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
- t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
+ for (i = 0; i < 64; i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
-
- /* clear any sensitive info... */
- a = b = c = d = e = f = g = h = t1 = t2 = 0;
- memzero_explicit(W, 64 * sizeof(u32));
}
void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
unsigned int partial, done;
const u8 *src;
+ u32 W[64];
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -224,11 +138,13 @@ void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
}
do {
- sha256_transform(sctx->state, src);
+ sha256_transform(sctx->state, src, W);
done += 64;
src = data + done;
} while (done + 63 < len);
+ memzero_explicit(W, sizeof(W));
+
partial = 0;
}
memcpy(sctx->buf + partial, src, len - done);
@@ -265,7 +181,7 @@ static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
put_unaligned_be32(sctx->state[i], &dst[i]);
/* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(*sctx));
+ memzero_explicit(sctx, sizeof(*sctx));
}
void sha256_final(struct sha256_state *sctx, u8 *out)
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index f9628f3924ce..c72c865032fa 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -390,7 +390,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
- /* Figure how how many bits are in next symbol and
+ /* Figure how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
index 0ad2c15479ed..790abc472f5b 100644
--- a/lib/decompress_unzstd.c
+++ b/lib/decompress_unzstd.c
@@ -178,8 +178,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
int err;
size_t ret;
+ /*
+ * ZSTD decompression code won't be happy if the buffer size is so big
+ * that its end address overflows. When the size is not provided, make
+ * it as big as possible without having the end address overflow.
+ */
if (out_len == 0)
- out_len = LONG_MAX; /* no limit */
+ out_len = UINTPTR_MAX - (uintptr_t)out_buf;
if (fill == NULL && flush == NULL)
/*
diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c
new file mode 100644
index 000000000000..c0d67c541849
--- /dev/null
+++ b/lib/devmem_is_allowed.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A generic version of devmem_is_allowed.
+ *
+ * Based on arch/arm64/mm/mmap.c
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/mm.h>
+#include <linux/ioport.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number. We mimic x86 here by
+ * disallowing access to system RAM as well as device-exclusive MMIO regions.
+ * This effectively disable read()/write() on /dev/mem.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ return 0;
+}
diff --git a/lib/digsig.c b/lib/digsig.c
index e0627c3e53b2..04b5e55ed95f 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -20,7 +20,7 @@
#include <linux/key.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <keys/user-type.h>
#include <linux/mpi.h>
#include <linux/digsig.h>
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index a4db51c21266..06811d866775 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -233,7 +233,7 @@ void net_dim(struct dim *dim, struct dim_sample end_sample)
schedule_work(&dim->work);
break;
}
- /* fall through */
+ fallthrough;
case DIM_START_MEASURE:
dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
end_sample.byte_ctr, &dim->start_sample);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
index f7e26c7b4749..15462d54758d 100644
--- a/lib/dim/rdma_dim.c
+++ b/lib/dim/rdma_dim.c
@@ -59,7 +59,7 @@ static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
break;
case DIM_STATS_WORSE:
dim_turn(dim);
- /* fall through */
+ fallthrough;
case DIM_STATS_BETTER:
step_res = rdma_dim_step(dim);
if (step_res == DIM_ON_EDGE)
@@ -94,7 +94,7 @@ void rdma_dim(struct dim *dim, u64 completions)
schedule_work(&dim->work);
break;
}
- /* fall through */
+ fallthrough;
case DIM_START_MEASURE:
dim->state = DIM_MEASURE_IN_PROGRESS;
dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index a00ee6eedc7c..f5a33b6f773f 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -12,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/kexec.h>
#include <linux/utsname.h>
+#include <linux/stop_machine.h>
static char dump_stack_arch_desc_str[128];
@@ -57,6 +58,7 @@ void dump_stack_print_info(const char *log_lvl)
log_lvl, dump_stack_arch_desc_str);
print_worker_info(log_lvl, current);
+ print_stop_info(log_lvl, current);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index bd7b3aaa93c3..c70d6347afa2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -561,9 +561,14 @@ static int ddebug_exec_queries(char *query, const char *modname)
int dynamic_debug_exec_queries(const char *query, const char *modname)
{
int rc;
- char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
+ char *qry; /* writable copy of query */
- if (!query)
+ if (!query) {
+ pr_err("non-null query/command string expected\n");
+ return -EINVAL;
+ }
+ qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
+ if (!qry)
return -ENOMEM;
rc = ddebug_exec_queries(qry, modname);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index e659a027036e..fde0aa244148 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -60,8 +60,8 @@ void dql_completed(struct dql *dql, unsigned int count)
* A decrease is only considered if the queue has been busy in
* the whole interval (the check above).
*
- * If there is slack, the amount of execess data queued above
- * the the amount needed to prevent starvation, the queue limit
+ * If there is slack, the amount of excess data queued above
+ * the amount needed to prevent starvation, the queue limit
* can be decreased. To avoid hysteresis we consider the
* minimum amount of slack found over several iterations of the
* completion routine.
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index c001e084829e..e83628882001 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -42,7 +42,7 @@ enum cpio_fields {
/**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio
* @path: The directory to search for, including a slash at the end
- * @data: Pointer to the the cpio archive or a header inside
+ * @data: Pointer to the cpio archive or a header inside
* @len: Remaining length of the cpio based on data pointer
* @nextoff: When a matching file is found, this is the offset from the
* beginning of the cpio to the beginning of the next file, not the
diff --git a/lib/errname.c b/lib/errname.c
index 0c4d3e66170e..05cbf731545f 100644
--- a/lib/errname.c
+++ b/lib/errname.c
@@ -3,6 +3,7 @@
#include <linux/errno.h>
#include <linux/errname.h>
#include <linux/kernel.h>
+#include <linux/math.h>
/*
* Ensure these tables do not accidentally become gigantic if some
diff --git a/lib/error-inject.c b/lib/error-inject.c
index aa63751c916f..c73651b15b76 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -180,6 +180,8 @@ static const char *error_type_string(int etype)
return "ERRNO";
case EI_ETYPE_ERRNO_NULL:
return "ERRNO_NULL";
+ case EI_ETYPE_TRUE:
+ return "TRUE";
default:
return "(unknown)";
}
diff --git a/lib/errseq.c b/lib/errseq.c
index 81f9e33aa7e7..93e9b94358dc 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -3,6 +3,7 @@
#include <linux/bug.h>
#include <linux/atomic.h>
#include <linux/errseq.h>
+#include <linux/log2.h>
/*
* An errseq_t is a way of recording errors in one place, and allowing any
diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c
new file mode 100644
index 000000000000..77558b6c29ca
--- /dev/null
+++ b/lib/fault-inject-usercopy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fault-inject.h>
+#include <linux/fault-inject-usercopy.h>
+
+static struct {
+ struct fault_attr attr;
+} fail_usercopy = {
+ .attr = FAULT_ATTR_INITIALIZER,
+};
+
+static int __init setup_fail_usercopy(char *str)
+{
+ return setup_fault_attr(&fail_usercopy.attr, str);
+}
+__setup("fail_usercopy=", setup_fail_usercopy);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_usercopy_debugfs(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_usercopy", NULL,
+ &fail_usercopy.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ return 0;
+}
+
+late_initcall(fail_usercopy_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+bool should_fail_usercopy(void)
+{
+ return should_fail(&fail_usercopy.attr, 1);
+}
+EXPORT_SYMBOL_GPL(should_fail_usercopy);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 49f875f1baf7..f67f86fd2f62 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -15,7 +15,9 @@
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/swab.h>
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
!defined(find_next_bit_le) || !defined(find_next_zero_bit_le) || \
diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
index 0e2deac97da0..5d940db626e7 100644
--- a/lib/fonts/font_10x18.c
+++ b/lib/fonts/font_10x18.c
@@ -8,7 +8,7 @@
#define FONTDATAMAX 9216
-static struct font_data fontdata_10x18 = {
+static const struct font_data fontdata_10x18 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 0000000000 */
@@ -5137,6 +5137,7 @@ const struct font_desc font_10x18 = {
.name = "10x18",
.width = 10,
.height = 18,
+ .charcount = 256,
.data = fontdata_10x18.data,
#ifdef __sparc__
.pref = 5,
diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
index 87da8acd07db..e65df019e0d2 100644
--- a/lib/fonts/font_6x10.c
+++ b/lib/fonts/font_6x10.c
@@ -3,7 +3,7 @@
#define FONTDATAMAX 2560
-static struct font_data fontdata_6x10 = {
+static const struct font_data fontdata_6x10 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -3083,6 +3083,7 @@ const struct font_desc font_6x10 = {
.name = "6x10",
.width = 6,
.height = 10,
+ .charcount = 256,
.data = fontdata_6x10.data,
.pref = 0,
};
diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
index 5e975dfa10a5..bd76b3f6b635 100644
--- a/lib/fonts/font_6x11.c
+++ b/lib/fonts/font_6x11.c
@@ -9,7 +9,7 @@
#define FONTDATAMAX (11*256)
-static struct font_data fontdata_6x11 = {
+static const struct font_data fontdata_6x11 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -3346,6 +3346,7 @@ const struct font_desc font_vga_6x11 = {
.name = "ProFont6x11",
.width = 6,
.height = 11,
+ .charcount = 256,
.data = fontdata_6x11.data,
/* Try avoiding this font if possible unless on MAC */
.pref = -2000,
diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c
index e06447788418..06ace7792521 100644
--- a/lib/fonts/font_6x8.c
+++ b/lib/fonts/font_6x8.c
@@ -3,8 +3,8 @@
#define FONTDATAMAX 2048
-static const unsigned char fontdata_6x8[FONTDATAMAX] = {
-
+static const struct font_data fontdata_6x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 000000 */
0x00, /* 000000 */
@@ -2564,13 +2564,14 @@ static const unsigned char fontdata_6x8[FONTDATAMAX] = {
0x00, /* 000000 */
0x00, /* 000000 */
0x00, /* 000000 */
-};
+} };
const struct font_desc font_6x8 = {
.idx = FONT6x8_IDX,
.name = "6x8",
.width = 6,
.height = 8,
- .data = fontdata_6x8,
+ .charcount = 256,
+ .data = fontdata_6x8.data,
.pref = 0,
};
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
index 86d298f38505..a2f561c9fa04 100644
--- a/lib/fonts/font_7x14.c
+++ b/lib/fonts/font_7x14.c
@@ -8,7 +8,7 @@
#define FONTDATAMAX 3584
-static struct font_data fontdata_7x14 = {
+static const struct font_data fontdata_7x14 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 0000000 */
@@ -4113,6 +4113,7 @@ const struct font_desc font_7x14 = {
.name = "7x14",
.width = 7,
.height = 14,
+ .charcount = 256,
.data = fontdata_7x14.data,
.pref = 0,
};
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
index 37cedd36ca5e..06ae14088514 100644
--- a/lib/fonts/font_8x16.c
+++ b/lib/fonts/font_8x16.c
@@ -10,7 +10,7 @@
#define FONTDATAMAX 4096
-static struct font_data fontdata_8x16 = {
+static const struct font_data fontdata_8x16 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -4627,6 +4627,7 @@ const struct font_desc font_vga_8x16 = {
.name = "VGA8x16",
.width = 8,
.height = 16,
+ .charcount = 256,
.data = fontdata_8x16.data,
.pref = 0,
};
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
index 8ab695538395..69570b8c31af 100644
--- a/lib/fonts/font_8x8.c
+++ b/lib/fonts/font_8x8.c
@@ -9,7 +9,7 @@
#define FONTDATAMAX 2048
-static struct font_data fontdata_8x8 = {
+static const struct font_data fontdata_8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -2578,6 +2578,7 @@ const struct font_desc font_vga_8x8 = {
.name = "VGA8x8",
.width = 8,
.height = 8,
+ .charcount = 256,
.data = fontdata_8x8.data,
.pref = 0,
};
diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
index 069b3e80c434..18755c33d249 100644
--- a/lib/fonts/font_acorn_8x8.c
+++ b/lib/fonts/font_acorn_8x8.c
@@ -5,7 +5,7 @@
#define FONTDATAMAX 2048
-static struct font_data acorndata_8x8 = {
+static const struct font_data acorndata_8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
@@ -270,6 +270,7 @@ const struct font_desc font_acorn_8x8 = {
.name = "Acorn8x8",
.width = 8,
.height = 8,
+ .charcount = 256,
.data = acorndata_8x8.data,
#ifdef CONFIG_ARCH_ACORN
.pref = 20,
diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
index 1449876c6a27..8d39fd447952 100644
--- a/lib/fonts/font_mini_4x6.c
+++ b/lib/fonts/font_mini_4x6.c
@@ -43,7 +43,7 @@ __END__;
#define FONTDATAMAX 1536
-static struct font_data fontdata_mini_4x6 = {
+static const struct font_data fontdata_mini_4x6 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/*{*/
/* Char 0: ' ' */
@@ -2152,6 +2152,7 @@ const struct font_desc font_mini_4x6 = {
.name = "MINI4x6",
.width = 4,
.height = 6,
+ .charcount = 256,
.data = fontdata_mini_4x6.data,
.pref = 3,
};
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
index 32d65551e7ed..b1678ed0017c 100644
--- a/lib/fonts/font_pearl_8x8.c
+++ b/lib/fonts/font_pearl_8x8.c
@@ -14,7 +14,7 @@
#define FONTDATAMAX 2048
-static struct font_data fontdata_pearl8x8 = {
+static const struct font_data fontdata_pearl8x8 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, /* 00000000 */
@@ -2582,6 +2582,7 @@ const struct font_desc font_pearl_8x8 = {
.name = "PEARL8x8",
.width = 8,
.height = 8,
+ .charcount = 256,
.data = fontdata_pearl8x8.data,
.pref = 2,
};
diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
index 641a6b4dca42..91daf5ab8b6b 100644
--- a/lib/fonts/font_sun12x22.c
+++ b/lib/fonts/font_sun12x22.c
@@ -3,7 +3,7 @@
#define FONTDATAMAX 11264
-static struct font_data fontdata_sun12x22 = {
+static const struct font_data fontdata_sun12x22 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
@@ -6156,6 +6156,7 @@ const struct font_desc font_sun_12x22 = {
.name = "SUN12x22",
.width = 12,
.height = 22,
+ .charcount = 256,
.data = fontdata_sun12x22.data,
#ifdef __sparc__
.pref = 5,
diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
index 193fe6d988e0..81bb4eeae04e 100644
--- a/lib/fonts/font_sun8x16.c
+++ b/lib/fonts/font_sun8x16.c
@@ -3,7 +3,7 @@
#define FONTDATAMAX 4096
-static struct font_data fontdata_sun8x16 = {
+static const struct font_data fontdata_sun8x16 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
@@ -268,6 +268,7 @@ const struct font_desc font_sun_8x16 = {
.name = "SUN8x16",
.width = 8,
.height = 16,
+ .charcount = 256,
.data = fontdata_sun8x16.data,
#ifdef __sparc__
.pref = 10,
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
index 91b9c283bd9c..5baedc573dd6 100644
--- a/lib/fonts/font_ter16x32.c
+++ b/lib/fonts/font_ter16x32.c
@@ -4,7 +4,7 @@
#define FONTDATAMAX 16384
-static struct font_data fontdata_ter16x32 = {
+static const struct font_data fontdata_ter16x32 = {
{ 0, 0, FONTDATAMAX, 0 }, {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
@@ -774,8 +774,8 @@ static struct font_data fontdata_ter16x32 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
- 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
- 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1169,7 +1169,7 @@ static struct font_data fontdata_ter16x32 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+ 0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
@@ -2062,6 +2062,7 @@ const struct font_desc font_ter_16x32 = {
.name = "TER16x32",
.width = 16,
.height = 32,
+ .charcount = 256,
.data = fontdata_ter16x32.data,
#ifdef __sparc__
.pref = 5,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 7f1244b5294a..dab97bb69df6 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
* users set the same bit, one user will return remain bits, otherwise
* return 0.
*/
-static int bitmap_set_ll(unsigned long *map, int start, int nr)
+static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_set >= 0) {
+ while (nr >= bits_to_set) {
if (set_bits_ll(p, mask_to_set))
return nr;
nr -= bits_to_set;
@@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
* users clear the same bit, one user will return remain bits,
* otherwise return 0.
*/
-static int bitmap_clear_ll(unsigned long *map, int start, int nr)
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
{
unsigned long *p = map + BIT_WORD(start);
- const int size = start + nr;
+ const unsigned long size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
- while (nr - bits_to_clear >= 0) {
+ while (nr >= bits_to_clear) {
if (clear_bits_ll(p, mask_to_clear))
return nr;
nr -= bits_to_clear;
@@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
size_t size, int nid, void *owner)
{
struct gen_pool_chunk *chunk;
- int nbits = size >> pool->min_alloc_order;
- int nbytes = sizeof(struct gen_pool_chunk) +
+ unsigned long nbits = size >> pool->min_alloc_order;
+ unsigned long nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
chunk = vzalloc_node(nbytes, nid);
@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
struct list_head *_chunk, *_next_chunk;
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int bit, end_bit;
+ unsigned long bit, end_bit;
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
@@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit, end_bit, remain;
+ unsigned long nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
{
struct gen_pool_chunk *chunk;
int order = pool->min_alloc_order;
- int start_bit, nbits, remain;
+ unsigned long start_bit, nbits, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -755,7 +756,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
index = bitmap_find_next_zero_area(map, size, start, nr, 0);
while (index < size) {
- int next_bit = find_next_bit(map, size, index + nr);
+ unsigned long next_bit = find_next_bit(map, size, index + nr);
if ((next_bit - index) < len) {
len = next_bit - index;
start_bit = index;
diff --git a/lib/glob.c b/lib/glob.c
index 52e3ed7e4a9b..85ecbda45cd8 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -102,7 +102,7 @@ bool __pure glob_match(char const *pat, char const *str)
break;
case '\\':
d = *pat++;
- /* fall through */
+ fallthrough;
default: /* Literal character */
literal:
if (c == d) {
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 147133f8eb2f..9301578f98e8 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -7,6 +7,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/export.h>
#include <asm/unaligned.h>
diff --git a/lib/idr.c b/lib/idr.c
index c2cf2c52bbde..f4ab4f4aa3c7 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -372,7 +372,8 @@ EXPORT_SYMBOL(idr_replace);
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
@@ -470,6 +471,7 @@ alloc:
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
+ kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
@@ -479,7 +481,8 @@ EXPORT_SYMBOL(ida_alloc_range);
* @ida: IDA handle.
* @id: Previously allocated ID.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
@@ -531,7 +534,8 @@ EXPORT_SYMBOL(ida_free);
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
- * Context: Any context.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
*/
void ida_destroy(struct ida *ida)
{
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 14cae2584db4..d8ca336ff9cd 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -2,6 +2,7 @@
#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
@@ -71,8 +72,6 @@
__start.bi_bvec_done = skip; \
__start.bi_idx = 0; \
for_each_bvec(__v, i->bvec, __bi, __start) { \
- if (!__v.bv_len) \
- continue; \
(void)(STEP); \
} \
}
@@ -140,6 +139,8 @@
static int copyout(void __user *to, const void *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
@@ -149,6 +150,8 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
+ if (should_fail_usercopy())
+ return n;
if (access_ok(from, n)) {
instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
@@ -587,14 +590,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
+ struct csum_state *csstate,
+ struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
+ __wsum sum = csstate->csum;
+ size_t off = csstate->off;
unsigned int i_head;
size_t n, r;
- size_t off = 0;
- __wsum sum = *csum;
if (!sanity(i))
return 0;
@@ -616,7 +620,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
i_head++;
} while (n);
i->count -= bytes;
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
@@ -1062,6 +1067,21 @@ static void pipe_advance(struct iov_iter *i, size_t size)
pipe_truncate(i);
}
+static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
+{
+ struct bvec_iter bi;
+
+ bi.bi_size = i->count;
+ bi.bi_bvec_done = i->iov_offset;
+ bi.bi_idx = 0;
+ bvec_iter_advance(i->bvec, &bi, size);
+
+ i->bvec += bi.bi_idx;
+ i->nr_segs -= bi.bi_idx;
+ i->count = bi.bi_size;
+ i->iov_offset = bi.bi_bvec_done;
+}
+
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(iov_iter_is_pipe(i))) {
@@ -1072,6 +1092,10 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
i->count -= size;
return;
}
+ if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ return;
+ }
iterate_and_advance(i, size, v, 0, 0, 0)
}
EXPORT_SYMBOL(iov_iter_advance);
@@ -1517,18 +1541,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
+ struct csum_state *csstate = _csstate;
const char *from = addr;
- __wsum *csum = csump;
__wsum sum, next;
- size_t off = 0;
+ size_t off;
if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+ return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = *csum;
+ sum = csstate->csum;
+ off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
@@ -1556,7 +1581,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
off += v.iov_len;
})
)
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
@@ -1653,7 +1679,7 @@ static int copy_compat_iovec_from_user(struct iovec *iov,
(const struct compat_iovec __user *)uvec;
int ret = -EFAULT, i;
- if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
for (i = 0; i < nr_segs; i++) {
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index a14ccf905055..a118b0b1e9b2 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -355,6 +355,7 @@ int kstrtobool(const char *s, bool *res)
default:
break;
}
+ break;
default:
break;
}
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 00909e6a2443..0b5dfb001bac 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -4,6 +4,7 @@
menuconfig KUNIT
tristate "KUnit - Enable support for unit tests"
+ select GLOB if KUNIT=y
help
Enables support for kernel unit tests (KUnit), a lightweight unit
testing and mocking framework for the Linux kernel. These tests are
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 724b94311ca3..c49f4ffb6273 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -3,7 +3,8 @@ obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
string-stream.o \
assert.o \
- try-catch.o
+ try-catch.o \
+ executor.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index 33acdaa28a7d..e0ec7d6fed6f 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -85,6 +85,29 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
}
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
+/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
+static bool is_literal(struct kunit *test, const char *text, long long value,
+ gfp_t gfp)
+{
+ char *buffer;
+ int len;
+ bool ret;
+
+ len = snprintf(NULL, 0, "%lld", value);
+ if (strlen(text) != len)
+ return false;
+
+ buffer = kunit_kmalloc(test, len+1, gfp);
+ if (!buffer)
+ return false;
+
+ snprintf(buffer, len+1, "%lld", value);
+ ret = strncmp(buffer, text, len) == 0;
+
+ kunit_kfree(test, buffer);
+ return ret;
+}
+
void kunit_binary_assert_format(const struct kunit_assert *assert,
struct string_stream *stream)
{
@@ -97,12 +120,16 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert->left_text,
binary_assert->operation,
binary_assert->right_text);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
- binary_assert->left_text,
- binary_assert->left_value);
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
- binary_assert->right_text,
- binary_assert->right_value);
+ if (!is_literal(stream->test, binary_assert->left_text,
+ binary_assert->left_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ binary_assert->left_text,
+ binary_assert->left_value);
+ if (!is_literal(stream->test, binary_assert->right_text,
+ binary_assert->right_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ binary_assert->right_text,
+ binary_assert->right_value);
kunit_assert_print_msg(assert, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
new file mode 100644
index 000000000000..15832ed44668
--- /dev/null
+++ b/lib/kunit/executor.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+#include <linux/glob.h>
+#include <linux/moduleparam.h>
+
+/*
+ * These symbols point to the .kunit_test_suites section and are defined in
+ * include/asm-generic/vmlinux.lds.h, and consequently must be extern.
+ */
+extern struct kunit_suite * const * const __kunit_suites_start[];
+extern struct kunit_suite * const * const __kunit_suites_end[];
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static char *filter_glob;
+module_param(filter_glob, charp, 0);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites run at boot-time, e.g. list*");
+
+static struct kunit_suite * const *
+kunit_filter_subsuite(struct kunit_suite * const * const subsuite)
+{
+ int i, n = 0;
+ struct kunit_suite **filtered;
+
+ n = 0;
+ for (i = 0; subsuite[i] != NULL; ++i) {
+ if (glob_match(filter_glob, subsuite[i]->name))
+ ++n;
+ }
+
+ if (n == 0)
+ return NULL;
+
+ filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered)
+ return NULL;
+
+ n = 0;
+ for (i = 0; subsuite[i] != NULL; ++i) {
+ if (glob_match(filter_glob, subsuite[i]->name))
+ filtered[n++] = subsuite[i];
+ }
+ filtered[n] = NULL;
+
+ return filtered;
+}
+
+struct suite_set {
+ struct kunit_suite * const * const *start;
+ struct kunit_suite * const * const *end;
+};
+
+static struct suite_set kunit_filter_suites(void)
+{
+ int i;
+ struct kunit_suite * const **copy, * const *filtered_subsuite;
+ struct suite_set filtered;
+
+ const size_t max = __kunit_suites_end - __kunit_suites_start;
+
+ if (!filter_glob) {
+ filtered.start = __kunit_suites_start;
+ filtered.end = __kunit_suites_end;
+ return filtered;
+ }
+
+ copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
+ filtered.start = copy;
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ filtered.end = copy;
+ return filtered;
+ }
+
+ for (i = 0; i < max; ++i) {
+ filtered_subsuite = kunit_filter_subsuite(__kunit_suites_start[i]);
+ if (filtered_subsuite)
+ *copy++ = filtered_subsuite;
+ }
+ filtered.end = copy;
+ return filtered;
+}
+
+static void kunit_print_tap_header(struct suite_set *suite_set)
+{
+ struct kunit_suite * const * const *suites, * const *subsuite;
+ int num_of_suites = 0;
+
+ for (suites = suite_set->start; suites < suite_set->end; suites++)
+ for (subsuite = *suites; *subsuite != NULL; subsuite++)
+ num_of_suites++;
+
+ pr_info("TAP version 14\n");
+ pr_info("1..%d\n", num_of_suites);
+}
+
+int kunit_run_all_tests(void)
+{
+ struct kunit_suite * const * const *suites;
+
+ struct suite_set suite_set = kunit_filter_suites();
+
+ kunit_print_tap_header(&suite_set);
+
+ for (suites = suite_set.start; suites < suite_set.end; suites++)
+ __kunit_test_suites_init(*suites);
+
+ if (filter_glob) { /* a copy was made of each array */
+ for (suites = suite_set.start; suites < suite_set.end; suites++)
+ kfree(*suites);
+ kfree(suite_set.start);
+ }
+
+ return 0;
+}
+
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index dcc35fd30d95..ec9494e914ef 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -16,16 +16,6 @@
#include "string-stream.h"
#include "try-catch-impl.h"
-static void kunit_print_tap_version(void)
-{
- static bool kunit_has_printed_tap_version;
-
- if (!kunit_has_printed_tap_version) {
- pr_info("TAP version 14\n");
- kunit_has_printed_tap_version = true;
- }
-}
-
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
@@ -65,7 +55,6 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_subtest_start(struct kunit_suite *suite)
{
- kunit_print_tap_version();
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
@@ -336,39 +325,72 @@ static void kunit_catch_run_case(void *data)
* occur in a test case and reports them as failures.
*/
static void kunit_run_case_catch_errors(struct kunit_suite *suite,
- struct kunit_case *test_case)
+ struct kunit_case *test_case,
+ struct kunit *test)
{
struct kunit_try_catch_context context;
struct kunit_try_catch *try_catch;
- struct kunit test;
- kunit_init_test(&test, test_case->name, test_case->log);
- try_catch = &test.try_catch;
+ kunit_init_test(test, test_case->name, test_case->log);
+ try_catch = &test->try_catch;
kunit_try_catch_init(try_catch,
- &test,
+ test,
kunit_try_run_case,
kunit_catch_run_case);
- context.test = &test;
+ context.test = test;
context.suite = suite;
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);
- test_case->success = test.success;
-
- kunit_print_ok_not_ok(&test, true, test_case->success,
- kunit_test_case_num(suite, test_case),
- test_case->name);
+ test_case->success = test->success;
}
int kunit_run_tests(struct kunit_suite *suite)
{
+ char param_desc[KUNIT_PARAM_DESC_SIZE];
struct kunit_case *test_case;
kunit_print_subtest_start(suite);
- kunit_suite_for_each_test_case(suite, test_case)
- kunit_run_case_catch_errors(suite, test_case);
+ kunit_suite_for_each_test_case(suite, test_case) {
+ struct kunit test = { .param_value = NULL, .param_index = 0 };
+ bool test_success = true;
+
+ if (test_case->generate_params) {
+ /* Get initial param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(NULL, param_desc);
+ }
+
+ do {
+ kunit_run_case_catch_errors(suite, test_case, &test);
+ test_success &= test_case->success;
+
+ if (test_case->generate_params) {
+ if (param_desc[0] == '\0') {
+ snprintf(param_desc, sizeof(param_desc),
+ "param-%d", test.param_index);
+ }
+
+ kunit_log(KERN_INFO, &test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: %s %d - %s",
+ test_case->name,
+ kunit_status_to_string(test.success),
+ test.param_index + 1, param_desc);
+
+ /* Get next param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(test.param_value, param_desc);
+ test.param_index++;
+ }
+ } while (test.param_value);
+
+ kunit_print_ok_not_ok(&test, true, test_success,
+ kunit_test_case_num(suite, test_case),
+ test_case->name);
+ }
kunit_print_subtest_end(suite);
@@ -381,7 +403,7 @@ static void kunit_init_suite(struct kunit_suite *suite)
kunit_debugfs_create_suite(suite);
}
-int __kunit_test_suites_init(struct kunit_suite **suites)
+int __kunit_test_suites_init(struct kunit_suite * const * const suites)
{
unsigned int i;
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 77ab839644c5..5ca0d815a95d 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -12,7 +12,7 @@
* pages = {},
* month = {June},
*}
- * Used by the iSCSI driver, possibly others, and derived from the
+ * Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
index 9495ef3572b7..ced5c15d3f04 100644
--- a/lib/linear_ranges.c
+++ b/lib/linear_ranges.c
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(linear_range_get_value_array);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Return selector which which range value is closest match for given
+ * Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is in the range, then @found is set true.
*
@@ -168,11 +168,11 @@ EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Scan array of ranges for selector which which range value matches given
+ * Scan array of ranges for selector for which range value matches given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is found to be in a range scanning is stopped and
* @found is set true. If a range with values smaller than given value is found
- * but the range max is being smaller than given value, then the ranges
+ * but the range max is being smaller than given value, then the range's
* biggest selector is updated to @selector but scanning ranges is continued
* and @found is set to false.
*
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
- * Return selector which which range value is closest match for given
+ * Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or higher than given
* value. If given value is in the range, then @found is set true.
*
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index a899b3f0e2e5..2d85abac1744 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/delay.h>
#include <linux/lockdep.h>
#include <linux/spinlock.h>
@@ -23,6 +24,7 @@
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/rtmutex.h>
+#include <linux/local_lock.h>
/*
* Change this to 1 if you want to see the failure printouts:
@@ -50,6 +52,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_RWSEM 0x8
#define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20
+#define LOCKTYPE_LL 0x40
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -58,10 +61,13 @@ static struct ww_mutex o, o2, o3;
* Normal standalone locks, for the circular and irq-context
* dependency tests:
*/
-static DEFINE_RAW_SPINLOCK(lock_A);
-static DEFINE_RAW_SPINLOCK(lock_B);
-static DEFINE_RAW_SPINLOCK(lock_C);
-static DEFINE_RAW_SPINLOCK(lock_D);
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
+
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B);
@@ -93,12 +99,12 @@ static DEFINE_RT_MUTEX(rtmutex_D);
* but X* and Y* are different classes. We do this so that
* we do not trigger a real lockup:
*/
-static DEFINE_RAW_SPINLOCK(lock_X1);
-static DEFINE_RAW_SPINLOCK(lock_X2);
-static DEFINE_RAW_SPINLOCK(lock_Y1);
-static DEFINE_RAW_SPINLOCK(lock_Y2);
-static DEFINE_RAW_SPINLOCK(lock_Z1);
-static DEFINE_RAW_SPINLOCK(lock_Z2);
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
static DEFINE_RWLOCK(rwlock_X1);
static DEFINE_RWLOCK(rwlock_X2);
@@ -132,16 +138,18 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif
+static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
+
/*
* non-inlined runtime initializers, to let separate locks share
* the same lock-class:
*/
#define INIT_CLASS_FUNC(class) \
static noinline void \
-init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
struct mutex *mutex, struct rw_semaphore *rwsem)\
{ \
- raw_spin_lock_init(lock); \
+ spin_lock_init(lock); \
rwlock_init(rwlock); \
mutex_init(mutex); \
init_rwsem(rwsem); \
@@ -210,10 +218,10 @@ static void init_shared_classes(void)
* Shortcuts for lock/unlock API variants, to keep
* the testcases compact:
*/
-#define L(x) raw_spin_lock(&lock_##x)
-#define U(x) raw_spin_unlock(&lock_##x)
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
#define LU(x) L(x); U(x)
-#define SI(x) raw_spin_lock_init(&lock_##x)
+#define SI(x) spin_lock_init(&lock_##x)
#define WL(x) write_lock(&rwlock_##x)
#define WU(x) write_unlock(&rwlock_##x)
@@ -1305,19 +1313,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
+# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
#ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif
#else
# define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
# define I_RWLOCK(x)
# define I_MUTEX(x)
# define I_RWSEM(x)
# define I_WW(x)
+# define I_LOCAL_LOCK(x)
#endif
#ifndef I_RTMUTEX
@@ -1341,7 +1353,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#define I2(x) \
do { \
- raw_spin_lock_init(&lock_##x); \
+ spin_lock_init(&lock_##x); \
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
@@ -1357,9 +1369,16 @@ static void reset_locks(void)
I1(A); I1(B); I1(C); I1(D);
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+ I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
+ I_LOCAL_LOCK(A);
+
lockdep_reset();
+
I2(A); I2(B); I2(C); I2(D);
init_shared_classes();
+ raw_spin_lock_init(&raw_lock_A);
+ raw_spin_lock_init(&raw_lock_B);
+ local_lock_init(&local_A);
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
@@ -1381,6 +1400,8 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
WARN_ON(irqs_disabled());
+ debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
+
testcase_fn();
/*
* Filter out expected failures:
@@ -1401,7 +1422,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
}
testcase_total++;
- if (debug_locks_verbose)
+ if (debug_locks_verbose & lockclass_mask)
pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
lockclass_mask, debug_locks, expected);
/*
@@ -2005,10 +2026,23 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
static void ww_test_spin_nest_unlocked(void)
{
- raw_spin_lock_nest_lock(&lock_A, &o.base);
+ spin_lock_nest_lock(&lock_A, &o.base);
U(A);
}
+/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
+static void ww_test_spin_nest_lock(void)
+{
+ spin_lock(&lock_X1);
+ spin_lock_nest_lock(&lock_Y1, &lock_X1);
+ spin_lock(&lock_A);
+ spin_lock_nest_lock(&lock_Y2, &lock_X1);
+ spin_unlock(&lock_A);
+ spin_unlock(&lock_Y2);
+ spin_unlock(&lock_Y1);
+ spin_unlock(&lock_X1);
+}
+
static void ww_test_unneeded_slow(void)
{
WWAI(&t);
@@ -2226,6 +2260,10 @@ static void ww_tests(void)
dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
pr_cont("\n");
+ print_testname("spinlock nest test");
+ dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
+ pr_cont("\n");
+
printk(" -----------------------------------------------------\n");
printk(" |block | try |context|\n");
printk(" -----------------------------------------------------\n");
@@ -2357,6 +2395,355 @@ static void queued_read_lock_tests(void)
pr_cont("\n");
}
+static void fs_reclaim_correct_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_NOFS);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_wrong_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_protected_nesting(void)
+{
+ unsigned int flags;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_nofs_save();
+ might_alloc(GFP_KERNEL);
+ memalloc_nofs_restore(flags);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_tests(void)
+{
+ printk(" --------------------\n");
+ printk(" | fs_reclaim tests |\n");
+ printk(" --------------------\n");
+
+ print_testname("correct nesting");
+ dotest(fs_reclaim_correct_nesting, SUCCESS, 0);
+ pr_cont("\n");
+
+ print_testname("wrong nesting");
+ dotest(fs_reclaim_wrong_nesting, FAILURE, 0);
+ pr_cont("\n");
+
+ print_testname("protected nesting");
+ dotest(fs_reclaim_protected_nesting, SUCCESS, 0);
+ pr_cont("\n");
+}
+
+#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+
+static void hardirq_exit(int *_)
+{
+ HARDIRQ_EXIT();
+}
+
+#define HARDIRQ_CONTEXT(name, ...) \
+ int hardirq_guard_##name __guard(hardirq_exit); \
+ HARDIRQ_ENTER();
+
+#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
+ int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
+ local_irq_disable(); \
+ __irq_enter(); \
+ WARN_ON(!in_irq());
+
+static void softirq_exit(int *_)
+{
+ SOFTIRQ_EXIT();
+}
+
+#define SOFTIRQ_CONTEXT(name, ...) \
+ int softirq_guard_##name __guard(softirq_exit); \
+ SOFTIRQ_ENTER();
+
+static void rcu_exit(int *_)
+{
+ rcu_read_unlock();
+}
+
+#define RCU_CONTEXT(name, ...) \
+ int rcu_guard_##name __guard(rcu_exit); \
+ rcu_read_lock();
+
+static void rcu_bh_exit(int *_)
+{
+ rcu_read_unlock_bh();
+}
+
+#define RCU_BH_CONTEXT(name, ...) \
+ int rcu_bh_guard_##name __guard(rcu_bh_exit); \
+ rcu_read_lock_bh();
+
+static void rcu_sched_exit(int *_)
+{
+ rcu_read_unlock_sched();
+}
+
+#define RCU_SCHED_CONTEXT(name, ...) \
+ int rcu_sched_guard_##name __guard(rcu_sched_exit); \
+ rcu_read_lock_sched();
+
+static void rcu_callback_exit(int *_)
+{
+ rcu_lock_release(&rcu_callback_map);
+}
+
+#define RCU_CALLBACK_CONTEXT(name, ...) \
+ int rcu_callback_guard_##name __guard(rcu_callback_exit); \
+ rcu_lock_acquire(&rcu_callback_map);
+
+
+static void raw_spinlock_exit(raw_spinlock_t **lock)
+{
+ raw_spin_unlock(*lock);
+}
+
+#define RAW_SPINLOCK_CONTEXT(name, lock) \
+ raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
+ raw_spin_lock(&(lock));
+
+static void spinlock_exit(spinlock_t **lock)
+{
+ spin_unlock(*lock);
+}
+
+#define SPINLOCK_CONTEXT(name, lock) \
+ spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
+ spin_lock(&(lock));
+
+static void mutex_exit(struct mutex **lock)
+{
+ mutex_unlock(*lock);
+}
+
+#define MUTEX_CONTEXT(name, lock) \
+ struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
+ mutex_lock(&(lock));
+
+#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
+ \
+static void __maybe_unused inner##_in_##outer(void) \
+{ \
+ outer##_CONTEXT(_, outer_lock); \
+ { \
+ inner##_CONTEXT(_, inner_lock); \
+ } \
+}
+
+/*
+ * wait contexts (considering PREEMPT_RT)
+ *
+ * o: inner is allowed in outer
+ * x: inner is disallowed in outer
+ *
+ * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
+ * outer \ | | | |
+ * ---------------+-------+----------+------+-------
+ * HARDIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * NOTTHREADED_IRQ| o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SOFTIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_BH | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_CALLBACK | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_SCHED | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * RAW_SPIN | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SPIN | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * MUTEX | o | o | o | o
+ * ---------------+-------+----------+------+-------
+ */
+
+#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
+
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
+
+/* the outer context allows all kinds of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+
+/*
+ * the outer context only allows the preemption introduced by spinlock_t (which
+ * is a sleepable lock for PREEMPT_RT)
+ */
+#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+/* the outer doesn't allows any kind of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+static void wait_context_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | wait context tests |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | rcu | raw | spin |mutex |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("in hardirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in hardirq context (not threaded)");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in softirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
+ pr_cont("\n");
+
+ print_testname("in RCU context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
+ pr_cont("\n");
+
+ print_testname("in RCU-bh context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
+ pr_cont("\n");
+
+ print_testname("in RCU callback context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
+ pr_cont("\n");
+
+ print_testname("in RCU-sched context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
+ pr_cont("\n");
+
+ print_testname("in RAW_SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
+ pr_cont("\n");
+
+ print_testname("in SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
+ pr_cont("\n");
+
+ print_testname("in MUTEX context");
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
+ pr_cont("\n");
+}
+
+static void local_lock_2(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3A(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3B(void)
+{
+ local_lock_acquire(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_lock_release(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_lock_release(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_DISABLE();
+
+}
+
+static void local_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | local_lock tests |\n");
+ printk(" ---------------------\n");
+
+ print_testname("local_lock inversion 2");
+ dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3A");
+ dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3B");
+ dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
+ pr_cont("\n");
+}
+
void locking_selftest(void)
{
/*
@@ -2384,7 +2771,6 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
init_shared_classes();
- debug_locks_silent = !debug_locks_verbose;
lockdep_set_selftest_task(current);
DO_TESTCASE_6R("A-A deadlock", AA);
@@ -2478,6 +2864,14 @@ void locking_selftest(void)
if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
queued_read_lock_tests();
+ fs_reclaim_tests();
+
+ /* Wait context test cases that are specific for RAW_LOCK_NESTING */
+ if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
+ wait_context_tests();
+
+ local_lock_tests();
+
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");
debug_locks = 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 00cb0d0b73e1..8a7724a6ce2f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -263,7 +263,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
}
}
- LZ4_memcpy(op, ip, length);
+ /*
+ * supports overlapping memory regions; only matters
+ * for in-place decompression scenarios
+ */
+ LZ4_memmove(op, ip, length);
ip += length;
op += length;
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index c91dd96ef629..673bd206aa98 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -146,6 +146,7 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
* environments. This is needed when decompressing the Linux Kernel, for example.
*/
#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
{
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 8ad5ba2b86e2..76758e9296ba 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -301,7 +301,7 @@ finished_writing_instruction:
return in_end - (ii - ti);
}
-int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
void *wrkmem, const unsigned char bitstream_version)
{
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 3952a07130d8..064d68a5391a 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -18,9 +18,11 @@
* or by defining a preprocessor macro in arch/include/asm/div64.h.
*/
+#include <linux/bitops.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/log2.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
diff --git a/lib/math/int_pow.c b/lib/math/int_pow.c
index 622fc1ab3c74..0cf426e69bda 100644
--- a/lib/math/int_pow.c
+++ b/lib/math/int_pow.c
@@ -6,7 +6,7 @@
*/
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/types.h>
/**
diff --git a/lib/math/int_sqrt.c b/lib/math/int_sqrt.c
index 30e0f9770f88..a8170bb9142f 100644
--- a/lib/math/int_sqrt.c
+++ b/lib/math/int_sqrt.c
@@ -6,9 +6,10 @@
* square root from Guy L. Steele.
*/
-#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
+#include <linux/limits.h>
+#include <linux/math.h>
/**
* int_sqrt - computes the integer square root
diff --git a/lib/math/rational.c b/lib/math/rational.c
index df75c8809693..9781d521963d 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -11,7 +11,7 @@
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
/*
* calculate best rational approximation for a given fraction
diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c
index bf043258fa00..6cb4adbb81d2 100644
--- a/lib/math/reciprocal_div.c
+++ b/lib/math/reciprocal_div.c
@@ -1,9 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <asm/div64.h>
-#include <linux/reciprocal_div.h>
#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+
+#include <linux/reciprocal_div.h>
/*
* For a description of the algorithm please have a look at
diff --git a/lib/mpi/ec.c b/lib/mpi/ec.c
index c21470122dfc..40f5908e57a4 100644
--- a/lib/mpi/ec.c
+++ b/lib/mpi/ec.c
@@ -1252,7 +1252,6 @@ void mpi_ec_mul_point(MPI_POINT result,
MPI_POINT q1, q2, prd, sum;
unsigned long sw;
mpi_size_t rsize;
- int scalar_copied = 0;
/* Compute scalar point multiplication with Montgomery Ladder.
* Note that we don't use Y-coordinate in the points at all.
@@ -1314,8 +1313,6 @@ void mpi_ec_mul_point(MPI_POINT result,
point_free(&p2);
point_free(&p1_);
point_free(&p2_);
- if (scalar_copied)
- mpi_free(scalar);
return;
}
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index a5119a2bcdd4..142b680835df 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -1,4 +1,4 @@
-/* mpi-bit.c - MPI bit level fucntions
+/* mpi-bit.c - MPI bit level functions
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
diff --git a/lib/nlattr.c b/lib/nlattr.c
index bc5b5cf608c4..5b6116e81f9f 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -96,8 +96,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
continue;
if (nla_len(entry) < NLA_HDRLEN) {
- NL_SET_ERR_MSG_ATTR(extack, entry,
- "Array element too short");
+ NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
+ "Array element too short");
return -ERANGE;
}
@@ -124,6 +124,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
range->max = U8_MAX;
break;
case NLA_U16:
+ case NLA_BINARY:
range->max = U16_MAX;
break;
case NLA_U32:
@@ -140,6 +141,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
range->min = pt->min;
range->max = pt->max;
break;
@@ -157,9 +159,10 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
}
}
-static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
- const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+static int nla_validate_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
struct netlink_range_validation range;
u64 value;
@@ -178,15 +181,39 @@ static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
case NLA_MSECS:
value = nla_get_u64(nla);
break;
+ case NLA_BINARY:
+ value = nla_len(nla);
+ break;
default:
return -EINVAL;
}
nla_get_range_unsigned(pt, &range);
+ if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
+ pt->type == NLA_BINARY && value > range.max) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, pt->type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+
+ /* this assumes min <= max (don't validate against min) */
+ return 0;
+ }
+
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ bool binary = pt->type == NLA_BINARY;
+
+ if (binary)
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "binary attribute size out of range");
+ else
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+
return -ERANGE;
}
@@ -264,8 +291,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
nla_get_range_signed(pt, &range);
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
return -ERANGE;
}
@@ -274,7 +301,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
static int nla_validate_int_range(const struct nla_policy *pt,
const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
switch (pt->type) {
case NLA_U8:
@@ -282,7 +310,8 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U32:
case NLA_U64:
case NLA_MSECS:
- return nla_validate_int_range_unsigned(pt, nla, extack);
+ case NLA_BINARY:
+ return nla_validate_range_unsigned(pt, nla, extack, validate);
case NLA_S8:
case NLA_S16:
case NLA_S32:
@@ -294,6 +323,37 @@ static int nla_validate_int_range(const struct nla_policy *pt,
}
}
+static int nla_validate_mask(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (value & ~(u64)pt->mask) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack, unsigned int depth)
@@ -313,15 +373,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
- (pt->type == NLA_EXACT_LEN &&
- pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG &&
- attrlen != pt->len)) {
+ if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "invalid attribute length");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
return -EINVAL;
}
}
@@ -329,14 +386,14 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (validate & NL_VALIDATE_NESTED) {
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED is missing");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED not expected");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED not expected");
return -EINVAL;
}
}
@@ -375,7 +432,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
err = -EINVAL;
goto out_err;
}
- /* fall through */
+ fallthrough;
case NLA_STRING:
if (attrlen < 1)
@@ -449,19 +506,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
"Unsupported attribute");
return -EINVAL;
}
- /* fall through */
- case NLA_MIN_LEN:
if (attrlen < pt->len)
goto out_err;
break;
- case NLA_EXACT_LEN:
- if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) {
- if (attrlen != pt->len)
- goto out_err;
- break;
- }
- /* fall through */
default:
if (pt->len)
minlen = pt->len;
@@ -479,9 +527,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
- err = nla_validate_int_range(pt, nla, extack);
+ err = nla_validate_int_range(pt, nla, extack, validate);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_MASK:
+ err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
@@ -496,7 +550,8 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
return 0;
out_err:
- NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "Attribute failed policy validation");
return err;
}
@@ -654,35 +709,47 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
EXPORT_SYMBOL(nla_find);
/**
- * nla_strlcpy - Copy string attribute payload into a sized buffer
- * @dst: where to copy the string to
- * @nla: attribute to copy the string from
- * @dstsize: size of destination buffer
+ * nla_strscpy - Copy string attribute payload into a sized buffer
+ * @dst: Where to copy the string to.
+ * @nla: Attribute to copy the string from.
+ * @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
- * The result is always a valid NUL-terminated string. Unlike
- * strlcpy the destination buffer is always padded out.
+ * Unlike strlcpy the destination buffer is always padded out.
*
- * Returns the length of the source buffer.
+ * Return:
+ * * srclen - Returns @nla length (not including the trailing %NUL).
+ * * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater
+ * than @dstsize.
*/
-size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
+ ssize_t ret;
+ size_t len;
+
+ if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX))
+ return -E2BIG;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
- if (dstsize > 0) {
- size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
-
- memset(dst, 0, dstsize);
- memcpy(dst, src, len);
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
}
- return srclen;
+ memcpy(dst, src, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
+
+ return ret;
}
-EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(nla_strscpy);
/**
* nla_strdup - Copy string attribute payload into a newly allocated buffer
@@ -816,8 +883,7 @@ EXPORT_SYMBOL(__nla_reserve);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
- if (nla_need_padding_for_64bit(skb))
- nla_align_64bit(skb, padattr);
+ nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 15ca78e1c7d4..8abe1870dba4 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -85,12 +85,16 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
put_cpu();
}
+// Dump stacks even for idle CPUs.
+static bool backtrace_idle;
+module_param(backtrace_idle, bool, 0644);
+
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
} else {
diff --git a/lib/parman.c b/lib/parman.c
index c6e42a8db824..a11f2f667639 100644
--- a/lib/parman.c
+++ b/lib/parman.c
@@ -85,7 +85,6 @@ static int parman_shrink(struct parman *parman)
}
static bool parman_prio_used(struct parman_prio *prio)
-
{
return !list_empty(&prio->item_list);
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index e59eda07305e..a1071cdefb5a 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
@@ -168,6 +169,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ static atomic_t underflows;
unsigned long count = 0;
int cpu;
@@ -191,9 +193,13 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
*/
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
- WARN_ONCE(atomic_long_read(&data->count) <= 0,
- "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
- data->release, atomic_long_read(&data->count));
+ if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
+ "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
+ data->release, atomic_long_read(&data->count)) &&
+ atomic_inc_return(&underflows) < 4) {
+ pr_err("%s(): percpu_ref underflow", __func__);
+ mem_dump_obj(data);
+ }
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index f61689a96e85..00f666d94486 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -85,7 +85,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
+ if (abs(count) >= batch) {
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e4a3a4397f2..3a4da11b804d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/percpu.h>
-#include <linux/local_lock.h>
#include <linux/preempt.h> /* in_interrupt() */
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
@@ -325,7 +324,7 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
int ret = -ENOMEM;
/*
- * Nodes preloaded by one cgroup can be be used by another cgroup, so
+ * Nodes preloaded by one cgroup can be used by another cgroup, so
* they should never be accounted to any particular memory cgroup.
*/
gfp_mask &= ~__GFP_ACCOUNT;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b4c0df6d706d..c770570bfe4f 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -48,7 +48,7 @@ endif
endif
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
+ cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
targets += int1.c int2.c int4.c int8.c int16.c int32.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
diff --git a/lib/random32.c b/lib/random32.c
index dfb9981ab798..4d0e05e471d7 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -38,19 +38,10 @@
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/bitops.h>
#include <asm/unaligned.h>
#include <trace/events/random.h>
-#ifdef CONFIG_RANDOM32_SELFTEST
-static void __init prandom_state_selftest(void);
-#else
-static inline void prandom_state_selftest(void)
-{
-}
-#endif
-
-DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
-
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
@@ -71,26 +62,6 @@ u32 prandom_u32_state(struct rnd_state *state)
EXPORT_SYMBOL(prandom_u32_state);
/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
- u32 res;
-
- res = prandom_u32_state(state);
- trace_prandom_u32(res);
- put_cpu_var(net_rand_state);
-
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
@@ -121,20 +92,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
}
EXPORT_SYMBOL(prandom_bytes_state);
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct rnd_state *state = &get_cpu_var(net_rand_state);
-
- prandom_bytes_state(state, buf, bytes);
- put_cpu_var(net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
@@ -150,96 +107,6 @@ static void prandom_warmup(struct rnd_state *state)
prandom_u32_state(state);
}
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
-{
-#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
-}
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional entropy to the prandom pool.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
- /*
- * No locking on the CPUs, but then somewhat random results are, well,
- * expected.
- */
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
-
- state->s1 = __seed(state->s1 ^ entropy, 2U);
- prandom_warmup(state);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * to start the prandom_u32() engine.
- */
-static int __init prandom_init(void)
-{
- int i;
-
- prandom_state_selftest();
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state, i);
- u32 weak_seed = (i + jiffies) ^ random_get_entropy();
-
- prandom_seed_early(state, weak_seed, true);
- prandom_warmup(state);
- }
-
- return 0;
-}
-core_initcall(prandom_init);
-
-static void __prandom_timer(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, __prandom_timer);
-
-static void __prandom_timer(struct timer_list *unused)
-{
- u32 entropy;
- unsigned long expires;
-
- get_random_bytes(&entropy, sizeof(entropy));
- prandom_seed(entropy);
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = 40 + prandom_u32_max(40);
- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
-
- add_timer(&seed_timer);
-}
-
-static void __init __prandom_start_seed_timer(void)
-{
- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
- add_timer(&seed_timer);
-}
-
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
@@ -259,51 +126,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
EXPORT_SYMBOL(prandom_seed_full_state);
-/*
- * Generate better values after random number generator
- * is fully initialized.
- */
-static void __prandom_reseed(bool late)
-{
- unsigned long flags;
- static bool latch = false;
- static DEFINE_SPINLOCK(lock);
-
- /* Asking for random bytes might result in bytes getting
- * moved into the nonblocking pool and thus marking it
- * as initialized. In this case we would double back into
- * this function and attempt to do a late reseed.
- * Ignore the pointless attempt to reseed again if we're
- * already waiting for bytes when the nonblocking pool
- * got initialized.
- */
-
- /* only allow initial seeding (late == false) once */
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (latch && !late)
- goto out;
-
- latch = true;
- prandom_seed_full_state(&net_rand_state);
-out:
- spin_unlock_irqrestore(&lock, flags);
-}
-
-void prandom_reseed_late(void)
-{
- __prandom_reseed(true);
-}
-
-static int __init prandom_reseed(void)
-{
- __prandom_reseed(false);
- __prandom_start_seed_timer();
- return 0;
-}
-late_initcall(prandom_reseed);
-
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
@@ -423,7 +245,28 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static void __init prandom_state_selftest(void)
+static u32 __extract_hwseed(void)
+{
+ unsigned int val = 0;
+
+ (void)(arch_get_random_seed_int(&val) ||
+ arch_get_random_int(&val));
+
+ return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+ bool mix_with_hwseed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
+ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
+ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
+ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
@@ -463,5 +306,327 @@ static void __init prandom_state_selftest(void)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
}
+core_initcall(prandom_state_selftest);
#endif
+
+/*
+ * The prandom_u32() implementation is now completely separate from the
+ * prandom_state() functions, which are retained (for now) for compatibility.
+ *
+ * Because of (ab)use in the networking code for choosing random TCP/UDP port
+ * numbers, which open DoS possibilities if guessable, we want something
+ * stronger than a standard PRNG. But the performance requirements of
+ * the network code do not allow robust crypto for this application.
+ *
+ * So this is a homebrew Junior Spaceman implementation, based on the
+ * lowest-latency trustworthy crypto primitive available, SipHash.
+ * (The authors of SipHash have not been consulted about this abuse of
+ * their work.)
+ *
+ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
+ * one word of output. This abbreviated version uses 2 rounds per word
+ * of output.
+ */
+
+struct siprand_state {
+ unsigned long v0;
+ unsigned long v1;
+ unsigned long v2;
+ unsigned long v3;
+};
+
+static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(unsigned long, net_rand_noise);
+EXPORT_PER_CPU_SYMBOL(net_rand_noise);
+
+/*
+ * This is the core CPRNG function. As "pseudorandom", this is not used
+ * for truly valuable things, just intended to be a PITA to guess.
+ * For maximum speed, we do just two SipHash rounds per word. This is
+ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
+ * so hopefully it's reasonably secure.
+ *
+ * There are two changes from the official SipHash finalization:
+ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
+ * they are there only to make the output rounds distinct from the input
+ * rounds, and this application has no input rounds.
+ * - Rather than returning v0^v1^v2^v3, return v1+v3.
+ * If you look at the SipHash round, the last operation on v3 is
+ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
+ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
+ * it still cancels out half of the bits in v2 for no benefit.)
+ * Second, since the last combining operation was xor, continue the
+ * pattern of alternating xor/add for a tiny bit of extra non-linearity.
+ */
+static inline u32 siprand_u32(struct siprand_state *s)
+{
+ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
+ unsigned long n = raw_cpu_read(net_rand_noise);
+
+ v3 ^= n;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= n;
+ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
+ return v1 + v3;
+}
+
+
+/**
+ * prandom_u32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 prandom_u32(void)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u32 res = siprand_u32(state);
+
+ trace_prandom_u32(res);
+ put_cpu_ptr(&net_rand_state);
+ return res;
+}
+EXPORT_SYMBOL(prandom_u32);
+
+/**
+ * prandom_bytes - get the requested number of pseudo-random bytes
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ */
+void prandom_bytes(void *buf, size_t bytes)
+{
+ struct siprand_state *state = get_cpu_ptr(&net_rand_state);
+ u8 *ptr = buf;
+
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(siprand_u32(state), (u32 *)ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
+ }
+
+ if (bytes > 0) {
+ u32 rem = siprand_u32(state);
+
+ do {
+ *ptr++ = (u8)rem;
+ rem >>= BITS_PER_BYTE;
+ } while (--bytes > 0);
+ }
+ put_cpu_ptr(&net_rand_state);
+}
+EXPORT_SYMBOL(prandom_bytes);
+
+/**
+ * prandom_seed - add entropy to pseudo random number generator
+ * @entropy: entropy value
+ *
+ * Add some additional seed material to the prandom pool.
+ * The "entropy" is actually our IP address (the only caller is
+ * the network code), not for unpredictability, but to ensure that
+ * different machines are initialized differently.
+ */
+void prandom_seed(u32 entropy)
+{
+ int i;
+
+ add_device_randomness(&entropy, sizeof(entropy));
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
+ unsigned long v0 = state->v0, v1 = state->v1;
+ unsigned long v2 = state->v2, v3 = state->v3;
+
+ do {
+ v3 ^= entropy;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= entropy;
+ } while (unlikely(!v0 || !v1 || !v2 || !v3));
+
+ WRITE_ONCE(state->v0, v0);
+ WRITE_ONCE(state->v1, v1);
+ WRITE_ONCE(state->v2, v2);
+ WRITE_ONCE(state->v3, v3);
+ }
+}
+EXPORT_SYMBOL(prandom_seed);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * the prandom_u32() engine to be started.
+ */
+static int __init prandom_init_early(void)
+{
+ int i;
+ unsigned long v0, v1, v2, v3;
+
+ if (!arch_get_random_long(&v0))
+ v0 = jiffies;
+ if (!arch_get_random_long(&v1))
+ v1 = random_get_entropy();
+ v2 = v0 ^ PRND_K0;
+ v3 = v1 ^ PRND_K1;
+
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+
+ v3 ^= i;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= i;
+
+ state = per_cpu_ptr(&net_rand_state, i);
+ state->v0 = v0; state->v1 = v1;
+ state->v2 = v2; state->v3 = v3;
+ }
+
+ return 0;
+}
+core_initcall(prandom_init_early);
+
+
+/* Stronger reseeding when available, and periodically thereafter. */
+static void prandom_reseed(struct timer_list *unused);
+
+static DEFINE_TIMER(seed_timer, prandom_reseed);
+
+static void prandom_reseed(struct timer_list *unused)
+{
+ unsigned long expires;
+ int i;
+
+ /*
+ * Reinitialize each CPU's PRNG with 128 bits of key.
+ * No locking on the CPUs, but then somewhat random results are,
+ * well, expected.
+ */
+ for_each_possible_cpu(i) {
+ struct siprand_state *state;
+ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
+ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
+#if BITS_PER_LONG == 32
+ int j;
+
+ /*
+ * On 32-bit machines, hash in two extra words to
+ * approximate 128-bit key length. Not that the hash
+ * has that much security, but this prevents a trivial
+ * 64-bit brute force.
+ */
+ for (j = 0; j < 2; j++) {
+ unsigned long m = get_random_long();
+
+ v3 ^= m;
+ PRND_SIPROUND(v0, v1, v2, v3);
+ PRND_SIPROUND(v0, v1, v2, v3);
+ v0 ^= m;
+ }
+#endif
+ /*
+ * Probably impossible in practice, but there is a
+ * theoretical risk that a race between this reseeding
+ * and the target CPU writing its state back could
+ * create the all-zero SipHash fixed point.
+ *
+ * To ensure that never happens, ensure the state
+ * we write contains no zero words.
+ */
+ state = per_cpu_ptr(&net_rand_state, i);
+ WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
+ WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
+ WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
+ WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
+ }
+
+ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
+ mod_timer(&seed_timer, expires);
+}
+
+/*
+ * The random ready callback can be called from almost any interrupt.
+ * To avoid worrying about whether it's safe to delay that interrupt
+ * long enough to seed all CPUs, just schedule an immediate timer event.
+ */
+static void prandom_timer_start(struct random_ready_callback *unused)
+{
+ mod_timer(&seed_timer, jiffies);
+}
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+/* Principle: True 32-bit random numbers will all have 16 differing bits on
+ * average. For each 32-bit number, there are 601M numbers differing by 16
+ * bits, and 89% of the numbers differ by at least 12 bits. Note that more
+ * than 16 differing bits also implies a correlation with inverted bits. Thus
+ * we take 1024 random numbers and compare each of them to the other ones,
+ * counting the deviation of correlated bits to 16. Constants report 32,
+ * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
+ * u32 total, TEST_SIZE may be as large as 4096 samples.
+ */
+#define TEST_SIZE 1024
+static int __init prandom32_state_selftest(void)
+{
+ unsigned int x, y, bits, samples;
+ u32 xor, flip;
+ u32 total;
+ u32 *data;
+
+ data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ for (samples = 0; samples < TEST_SIZE; samples++)
+ data[samples] = prandom_u32();
+
+ flip = total = 0;
+ for (x = 0; x < samples; x++) {
+ for (y = 0; y < samples; y++) {
+ if (x == y)
+ continue;
+ xor = data[x] ^ data[y];
+ flip |= xor;
+ bits = hweight32(xor);
+ total += (bits - 16) * (bits - 16);
+ }
+ }
+
+ /* We'll return the average deviation as 2*sqrt(corr/samples), which
+ * is also sqrt(4*corr/samples) which provides a better resolution.
+ */
+ bits = int_sqrt(total / (samples * (samples - 1)) * 4);
+ if (bits > 6)
+ pr_warn("prandom32: self test failed (at least %u bits"
+ " correlated, fixed_mask=%#x fixed_value=%#x\n",
+ bits, ~flip, data[0] & ~flip);
+ else
+ pr_info("prandom32: self test passed (less than %u bits"
+ " correlated)\n",
+ bits+1);
+ kfree(data);
+ return 0;
+}
+core_initcall(prandom32_state_selftest);
+#endif /* CONFIG_RANDOM32_SELFTEST */
+
+/*
+ * Start periodic full reseeding as soon as strong
+ * random numbers are available.
+ */
+static int __init prandom_init_late(void)
+{
+ static struct random_ready_callback random_ready = {
+ .func = prandom_timer_start
+ };
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (ret == -EALREADY) {
+ prandom_timer_start(&random_ready);
+ ret = 0;
+ }
+ return ret;
+}
+late_initcall(prandom_init_late);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 267aa7709416..d693d9213ceb 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -12,33 +12,24 @@
/*
* See if we have deferred clears that we can batch move
*/
-static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
- unsigned long mask, val;
- bool ret = false;
- unsigned long flags;
+ unsigned long mask;
- spin_lock_irqsave(&sb->map[index].swap_lock, flags);
-
- if (!sb->map[index].cleared)
- goto out_unlock;
+ if (!READ_ONCE(map->cleared))
+ return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
- mask = xchg(&sb->map[index].cleared, 0);
+ mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
- do {
- val = sb->map[index].word;
- } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
-
- ret = true;
-out_unlock:
- spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
- return ret;
+ atomic_long_andnot(mask, (atomic_long_t *)&map->word);
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
+ return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@@ -80,7 +71,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
- spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}
@@ -92,7 +82,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
- sbitmap_deferred_clear(sb, i);
+ sbitmap_deferred_clear(&sb->map[i]);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -107,9 +97,11 @@ EXPORT_SYMBOL_GPL(sbitmap_resize);
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
unsigned int hint, bool wrap)
{
- unsigned int orig_hint = hint;
int nr;
+ /* don't wrap if starting from 0 */
+ wrap = wrap && hint;
+
while (1) {
nr = find_next_zero_bit(word, depth, hint);
if (unlikely(nr >= depth)) {
@@ -118,8 +110,8 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (orig_hint && hint && wrap) {
- hint = orig_hint = 0;
+ if (hint && wrap) {
+ hint = 0;
continue;
}
return -1;
@@ -139,15 +131,15 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
unsigned int alloc_hint, bool round_robin)
{
+ struct sbitmap_word *map = &sb->map[index];
int nr;
do {
- nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth, alloc_hint,
+ nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
!round_robin);
if (nr != -1)
break;
- if (!sbitmap_deferred_clear(sb, index))
+ if (!sbitmap_deferred_clear(map))
break;
} while (1);
@@ -207,7 +199,7 @@ again:
break;
}
- if (sbitmap_deferred_clear(sb, index))
+ if (sbitmap_deferred_clear(&sb->map[index]))
goto again;
/* Jump to next index. */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5d63a8857f36..a59778946404 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -365,6 +365,37 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
}
EXPORT_SYMBOL(sg_alloc_table);
+static struct scatterlist *get_next_sg(struct sg_table *table,
+ struct scatterlist *cur,
+ unsigned long needed_sges,
+ gfp_t gfp_mask)
+{
+ struct scatterlist *new_sg, *next_sg;
+ unsigned int alloc_size;
+
+ if (cur) {
+ next_sg = sg_next(cur);
+ /* Check if last entry should be keeped for chainning */
+ if (!sg_is_last(next_sg) || needed_sges == 1)
+ return next_sg;
+ }
+
+ alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
+ new_sg = sg_kmalloc(alloc_size, gfp_mask);
+ if (!new_sg)
+ return ERR_PTR(-ENOMEM);
+ sg_init_table(new_sg, alloc_size);
+ if (cur) {
+ __sg_chain(next_sg, new_sg);
+ table->orig_nents += alloc_size - 1;
+ } else {
+ table->sgl = new_sg;
+ table->orig_nents = alloc_size;
+ table->nents = 0;
+ }
+ return new_sg;
+}
+
/**
* __sg_alloc_table_from_pages - Allocate and initialize an sg table from
* an array of pages
@@ -373,30 +404,69 @@ EXPORT_SYMBOL(sg_alloc_table);
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
- * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
+ * @max_segment: Maximum size of a scatterlist element in bytes
+ * @prv: Last populated sge in sgt
+ * @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask
*
- * Description:
- * Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node up to the
- * maximum size specified in @max_segment. An user may provide an offset at a
- * start and a size of valid data in a buffer specified by the page array.
- * The returned sg table is released by sg_free_table.
+ * Description:
+ * If @prv is NULL, allocate and initialize an sg table from a list of pages,
+ * else reuse the scatterlist passed in at @prv.
+ * Contiguous ranges of the pages are squashed into a single scatterlist
+ * entry up to the maximum size specified in @max_segment. A user may
+ * provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array.
*
* Returns:
- * 0 on success, negative error on failure
+ * Last SGE in sgt on success, PTR_ERR on otherwise.
+ * The allocation in @sgt must be released by sg_free_table.
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * sg_free_table() to cleanup any leftover allocations.
*/
-int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- gfp_t gfp_mask)
+struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ struct scatterlist *prv, unsigned int left_pages,
+ gfp_t gfp_mask)
{
- unsigned int chunks, cur_page, seg_len, i;
- int ret;
- struct scatterlist *s;
+ unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
+ unsigned int added_nents = 0;
+ struct scatterlist *s = prv;
- if (WARN_ON(!max_segment || offset_in_page(max_segment)))
- return -EINVAL;
+ /*
+ * The algorithm below requires max_segment to be aligned to PAGE_SIZE
+ * otherwise it can overshoot.
+ */
+ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
+ if (WARN_ON(max_segment < PAGE_SIZE))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (prv) {
+ unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
+ prv->offset + prv->length) /
+ PAGE_SIZE;
+
+ if (WARN_ON(offset))
+ return ERR_PTR(-EINVAL);
+
+ /* Merge contiguous pages into the last SG */
+ prv_len = prv->length;
+ while (n_pages && page_to_pfn(pages[0]) == paddr) {
+ if (prv->length + PAGE_SIZE > max_segment)
+ break;
+ prv->length += PAGE_SIZE;
+ paddr++;
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
+ }
/* compute number of contiguous chunks */
chunks = 1;
@@ -410,13 +480,9 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
}
}
- ret = sg_alloc_table(sgt, chunks, gfp_mask);
- if (unlikely(ret))
- return ret;
-
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ for (i = 0; i < chunks; i++) {
unsigned int j, chunk_size;
/* look for the end of the current chunk */
@@ -429,15 +495,30 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
break;
}
+ /* Pass how many chunks might be left */
+ s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
+ if (IS_ERR(s)) {
+ /*
+ * Adjust entry length to be as before function was
+ * called.
+ */
+ if (prv)
+ prv->length = prv_len;
+ return s;
+ }
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page],
min_t(unsigned long, size, chunk_size), offset);
+ added_nents++;
size -= chunk_size;
offset = 0;
cur_page = j;
}
-
- return 0;
+ sgt->nents += added_nents;
+out:
+ if (!left_pages)
+ sg_mark_end(s);
+ return s;
}
EXPORT_SYMBOL(__sg_alloc_table_from_pages);
@@ -465,8 +546,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask)
{
- return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
- SCATTERLIST_MAX_SEGMENT, gfp_mask);
+ return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
+ offset, size, UINT_MAX, NULL, 0, gfp_mask));
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
@@ -504,7 +585,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
nalloc++;
}
sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
- (gfp & ~GFP_DMA) | __GFP_ZERO);
+ gfp & ~GFP_DMA);
if (!sgl)
return NULL;
@@ -514,7 +595,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
elem_len = min_t(u64, length, PAGE_SIZE << order);
page = alloc_pages(gfp, order);
if (!page) {
- sgl_free(sgl);
+ sgl_free_order(sgl, order);
return NULL;
}
@@ -852,7 +933,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
- return false;
+ return 0;
while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len;
diff --git a/lib/sha1.c b/lib/sha1.c
index 49257a915bb6..9bd1935a1472 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
#include <asm/unaligned.h>
/*
diff --git a/lib/siphash.c b/lib/siphash.c
index c47bb6ff2149..a90112ee72a1 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
bytemask_from_count(left)));
#else
switch (left) {
- case 7: b |= ((u64)end[6]) << 48; /* fall through */
- case 6: b |= ((u64)end[5]) << 40; /* fall through */
- case 5: b |= ((u64)end[4]) << 32; /* fall through */
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
- case 3: b |= ((u64)end[2]) << 16; /* fall through */
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16; /* fall through */
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
v0 ^= m;
}
switch (left) {
- case 3: b |= ((u32)end[2]) << 16; /* fall through */
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 525222e4f409..1c1dbd300325 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -26,6 +26,11 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
if (current->nr_cpus_allowed == 1)
goto out;
+#ifdef CONFIG_SMP
+ if (current->migration_disabled)
+ goto out;
+#endif
+
/*
* It is valid to assume CPU-locality during early bootup:
*/
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 2caffc64e4c8..890dcc2e984e 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -62,7 +62,7 @@ struct stack_record {
u32 hash; /* Hash in the hastable */
u32 size; /* Number of frames in the stack */
union handle_parts handle;
- unsigned long entries[1]; /* Variable-sized array of entries. */
+ unsigned long entries[]; /* Variable-sized array of entries. */
};
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
@@ -104,9 +104,8 @@ static bool init_stack_slab(void **prealloc)
static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
u32 hash, void **prealloc, gfp_t alloc_flags)
{
- int required_size = offsetof(struct stack_record, entries) +
- sizeof(unsigned long) * size;
struct stack_record *stack;
+ size_t required_size = struct_size(stack, entries, size);
required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
@@ -136,7 +135,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
stack->handle.slabindex = depot_index;
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
stack->handle.valid = 1;
- memcpy(stack->entries, entries, size * sizeof(unsigned long));
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
depot_offset += required_size;
return stack;
@@ -155,8 +154,8 @@ static struct stack_record *stack_table[STACK_HASH_SIZE] = {
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
- size * sizeof(unsigned long) / sizeof(u32),
- STACK_HASH_SEED);
+ array_size(size, sizeof(*entries)) / sizeof(u32),
+ STACK_HASH_SEED);
}
/* Use our own, non-instrumented version of memcmp().
diff --git a/lib/string.c b/lib/string.c
index 4288e0158d47..7548eb715ddb 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -85,7 +85,6 @@ EXPORT_SYMBOL(strcasecmp);
* @dest: Where to copy the string to
* @src: Where to copy the string from
*/
-#undef strcpy
char *strcpy(char *dest, const char *src)
{
char *tmp = dest;
@@ -302,7 +301,6 @@ EXPORT_SYMBOL(stpcpy);
* @dest: The string to be appended to
* @src: The string to append to it
*/
-#undef strcat
char *strcat(char *dest, const char *src)
{
char *tmp = dest;
@@ -378,7 +376,6 @@ EXPORT_SYMBOL(strlcat);
* @cs: One string
* @ct: Another string
*/
-#undef strcmp
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
@@ -958,7 +955,6 @@ EXPORT_SYMBOL(memcmp);
* while this particular implementation is a simple (tail) call to memcmp, do
* not rely on anything but whether the return value is zero or non-zero.
*/
-#undef bcmp
int bcmp(const void *a, const void *b, size_t len)
{
return memcmp(a, b, len);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 34696a348864..122d8d0e253c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
@@ -34,17 +35,32 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
goto byte_at_a_time;
while (max >= sizeof(unsigned long)) {
- unsigned long c, data;
+ unsigned long c, data, mask;
/* Fall back to byte-at-a-time if we get a page fault */
unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
- *(unsigned long *)(dst+res) = c;
+ /*
+ * Note that we mask out the bytes following the NUL. This is
+ * important to do because string oblivious code may read past
+ * the NUL. For those routines, we don't want to give them
+ * potentially random bytes after the NUL in `src`.
+ *
+ * One example of such code is BPF map keys. BPF treats map keys
+ * as an opaque set of bytes. Without the post-NUL mask, any BPF
+ * maps keyed by strings returned from strncpy_from_user() may
+ * have multiple entries for semantically identical strings.
+ */
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
+ mask = zero_bytemask(data);
+ *(unsigned long *)(dst+res) = c & mask;
return res + find_zero(data);
}
+
+ *(unsigned long *)(dst+res) = c;
+
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}
@@ -99,6 +115,8 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max_addr, src_addr;
might_fault();
+ if (should_fail_usercopy())
+ return -EFAULT;
if (unlikely(count <= 0))
return 0;
diff --git a/lib/syscall.c b/lib/syscall.c
index fb328e7ccb08..ba13e924c430 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -7,6 +7,7 @@
static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
+ unsigned long args[6] = { };
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
@@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
info->data.nr = syscall_get_nr(target, regs);
if (info->data.nr != -1L)
- syscall_get_arguments(target, regs,
- (unsigned long *)&info->data.args[0]);
+ syscall_get_arguments(target, regs, args);
+
+ info->data.args[0] = args[0];
+ info->data.args[1] = args[1];
+ info->data.args[2] = args[2];
+ info->data.args[3] = args[3];
+ info->data.args[4] = args[4];
+ info->data.args[5] = args[5];
put_task_stack(target);
return 0;
@@ -44,7 +51,7 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
* .data.instruction_pointer - filled with user PC
*
* If @target is blocked in a system call, returns zero with @info.data.nr
- * set to the the call's number and @info.data.args filled in with its
+ * set to the call's number and @info.data.args filled in with its
* arguments. Registers not used for system call arguments may not be available
* and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index df903c53952b..0ea0e8258f14 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -16,8 +16,7 @@
#include "../tools/testing/selftests/kselftest_module.h"
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;
@@ -354,50 +353,37 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
};
-static void __init __test_bitmap_parselist(int is_user)
+static void __init test_bitmap_parselist(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
#define ptest parselist_tests[i]
- if (is_user) {
- mm_segment_t orig_fs = get_fs();
- size_t len = strlen(ptest.in);
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parselist_user((__force const char __user *)ptest.in, len,
- bmap, ptest.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- time = ktime_get();
- err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
+ time = ktime_get() - time;
if (err != ptest.errno) {
- pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, ptest.in, err, ptest.errno);
+ pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
+ i, ptest.in, err, ptest.errno);
continue;
}
if (!err && ptest.expected
&& !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
- pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, ptest.in, bmap[0],
+ pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, ptest.in, bmap[0],
*ptest.expected);
continue;
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, ptest.in, time);
+ pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ i, ptest.in, time);
#undef ptest
}
@@ -443,75 +429,41 @@ static const struct test_bitmap_parselist parse_tests[] __initconst = {
#undef step
};
-static void __init __test_bitmap_parse(int is_user)
+static void __init test_bitmap_parse(void)
{
int i;
int err;
ktime_t time;
DECLARE_BITMAP(bmap, 2048);
- char *mode = is_user ? "_user" : "";
for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
struct test_bitmap_parselist test = parse_tests[i];
+ size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in);
- if (is_user) {
- size_t len = strlen(test.in);
- mm_segment_t orig_fs = get_fs();
-
- set_fs(KERNEL_DS);
- time = ktime_get();
- err = bitmap_parse_user((__force const char __user *)test.in, len,
- bmap, test.nbits);
- time = ktime_get() - time;
- set_fs(orig_fs);
- } else {
- size_t len = test.flags & NO_LEN ?
- UINT_MAX : strlen(test.in);
- time = ktime_get();
- err = bitmap_parse(test.in, len, bmap, test.nbits);
- time = ktime_get() - time;
- }
+ time = ktime_get();
+ err = bitmap_parse(test.in, len, bmap, test.nbits);
+ time = ktime_get() - time;
if (err != test.errno) {
- pr_err("parse%s: %d: input is %s, errno is %d, expected %d\n",
- mode, i, test.in, err, test.errno);
+ pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
+ i, test.in, err, test.errno);
continue;
}
if (!err && test.expected
&& !__bitmap_equal(bmap, test.expected, test.nbits)) {
- pr_err("parse%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
- mode, i, test.in, bmap[0],
+ pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, test.in, bmap[0],
*test.expected);
continue;
}
if (test.flags & PARSE_TIME)
- pr_err("parse%s: %d: input is '%s' OK, Time: %llu\n",
- mode, i, test.in, time);
+ pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ i, test.in, time);
}
}
-static void __init test_bitmap_parselist(void)
-{
- __test_bitmap_parselist(0);
-}
-
-static void __init test_bitmap_parselist_user(void)
-{
- __test_bitmap_parselist(1);
-}
-
-static void __init test_bitmap_parse(void)
-{
- __test_bitmap_parse(0);
-}
-
-static void __init test_bitmap_parse_user(void)
-{
- __test_bitmap_parse(1);
-}
-
#define EXP1_IN_BITS (sizeof(exp1) * 8)
static void __init test_bitmap_arr32(void)
@@ -675,9 +627,7 @@ static void __init selftest(void)
test_replace();
test_bitmap_arr32();
test_bitmap_parse();
- test_bitmap_parse_user();
test_bitmap_parselist();
- test_bitmap_parselist_user();
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ca7d635bccd9..4dc4dcbecd12 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -345,7 +345,7 @@ static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
static int bpf_fill_maxinsns11(struct bpf_test *self)
{
- /* Hits 70 passes on x86_64, so cannot get JITed there. */
+ /* Hits 70 passes on x86_64 and triggers NOPs padding. */
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
@@ -4295,13 +4295,13 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
- /* BPF_STX | BPF_XADD | BPF_W/DW */
+ /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
{
"STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
@@ -4316,7 +4316,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_MOV, R1, R10),
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_ALU64_REG(BPF_MOV, R0, R10),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
@@ -4331,7 +4331,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4352,7 +4352,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
@@ -4367,7 +4367,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_MOV, R1, R10),
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_ALU64_REG(BPF_MOV, R0, R10),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
@@ -4382,7 +4382,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -5318,15 +5318,10 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
- CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
CLASSIC | FLAG_NO_DATA,
-#endif
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
- .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: jump over MSH",
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 2baa275a6ddf..b6fe89add9fe 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -364,18 +364,15 @@ static ssize_t test_dev_config_show_int(char *buf, int val)
static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
+ u8 val;
int ret;
- long new;
- ret = kstrtol(buf, 10, &new);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
- if (new > U8_MAX)
- return -EINVAL;
-
mutex_lock(&test_fw_mutex);
- *(u8 *)cfg = new;
+ *(u8 *)cfg = val;
mutex_unlock(&test_fw_mutex);
/* Always return full write size even if we didn't consume all */
diff --git a/lib/test_fpu.c b/lib/test_fpu.c
index c33764aa3eb8..e82db19fed84 100644
--- a/lib/test_fpu.c
+++ b/lib/test_fpu.c
@@ -63,7 +63,7 @@ static int test_fpu_get(void *data, u64 *val)
return status;
}
-DEFINE_SIMPLE_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
static struct dentry *selftest_dir;
static int __init test_fpu_init(void)
@@ -72,8 +72,8 @@ static int __init test_fpu_init(void)
if (!selftest_dir)
return -ENOMEM;
- debugfs_create_file("test_fpu", 0444, selftest_dir, NULL,
- &test_fpu_fops);
+ debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
+ &test_fpu_fops);
return 0;
}
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
index 074e76bd76b2..25ae1ac2624a 100644
--- a/lib/test_free_pages.c
+++ b/lib/test_free_pages.c
@@ -5,6 +5,8 @@
* Author: Matthew Wilcox <willy@infradead.org>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -26,8 +28,11 @@ static void test_free_pages(gfp_t gfp)
static int m_in(void)
{
+ pr_info("Testing with GFP_KERNEL\n");
test_free_pages(GFP_KERNEL);
+ pr_info("Testing with GFP_KERNEL | __GFP_COMP\n");
test_free_pages(GFP_KERNEL | __GFP_COMP);
+ pr_info("Test completed\n");
return 0;
}
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e151a7f10519..80a78877bd93 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -461,7 +461,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
if (!devmem)
- return -ENOMEM;
+ return false;
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 63c26171a791..2947274cc2d3 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -25,7 +25,7 @@
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
* We assign some test results to these globals to make sure the tests
@@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2];
} *ptr1, *ptr2;
+ /* This test is specifically crafted for the generic mode. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
+ return;
+ }
+
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
kfree(ptr2);
}
+static void kmalloc_uaf_16(struct kunit *test)
+{
+ struct {
+ u64 words[2];
+ } *ptr1, *ptr2;
+
+ ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+
+ ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ kfree(ptr2);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
+ kfree(ptr1);
+}
+
static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
@@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i];
+ /* Only generic mode instruments globals. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
@@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
char alloca_array[i];
char *p = alloca_array - 1;
+ /* Only generic mode instruments dynamic allocas. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
@@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
char alloca_array[i];
char *p = alloca_array + i;
+ /* Only generic mode instruments dynamic allocas. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required");
+ return;
+ }
+
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
@@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
return;
}
+ if (OOB_TAG_OFF)
+ size = round_up(size, OOB_TAG_OFF);
+
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
return;
}
+ if (OOB_TAG_OFF)
+ size = round_up(size, OOB_TAG_OFF);
+
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
@@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
}
-static void kasan_bitops(struct kunit *test)
+static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
+{
+ KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
+}
+
+static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
+{
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
+ clear_bit_unlock_is_negative_byte(nr, addr));
+#endif
+}
+
+static void kasan_bitops_generic(struct kunit *test)
{
+ long *bits;
+
+ /* This test is specifically crafted for the generic mode. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
+ return;
+ }
+
/*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory.
*/
- long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
/*
@@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test)
* below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in.
*/
- KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
+ kasan_bitops_modify(test, BITS_PER_LONG, bits);
/*
* Below calls try to access bit beyond allocated memory.
*/
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ kfree(bits);
+}
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+static void kasan_bitops_tags(struct kunit *test)
+{
+ long *bits;
- KUNIT_EXPECT_KASAN_FAIL(test,
- test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* This test is specifically crafted for the tag-based mode. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+ kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
+ return;
+ }
- KUNIT_EXPECT_KASAN_FAIL(test,
- __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* Allocation size will be rounded to up granule size, which is 16. */
+ bits = kzalloc(sizeof(*bits), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
- KUNIT_EXPECT_KASAN_FAIL(test,
- kasan_int_result =
- test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
+ /* Do the accesses past the 16 allocated bytes. */
+ kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
+ kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
-#if defined(clear_bit_unlock_is_negative_byte)
- KUNIT_EXPECT_KASAN_FAIL(test,
- kasan_int_result = clear_bit_unlock_is_negative_byte(
- BITS_PER_LONG + BITS_PER_BYTE, bits));
-#endif
kfree(bits);
}
@@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less),
KUNIT_CASE(kmalloc_oob_16),
+ KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
KUNIT_CASE(kmalloc_oob_memset_2),
KUNIT_CASE(kmalloc_oob_memset_4),
@@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),
- KUNIT_CASE(kasan_bitops),
+ KUNIT_CASE(kasan_bitops_generic),
+ KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
{}
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index 2d68db6ae67b..3b4cc77992d2 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -15,7 +15,7 @@
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
static noinline void __init copy_user_test(void)
{
@@ -91,6 +91,34 @@ static noinline void __init kasan_rcu_uaf(void)
call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim);
}
+static noinline void __init kasan_workqueue_work(struct work_struct *work)
+{
+ kfree(work);
+}
+
+static noinline void __init kasan_workqueue_uaf(void)
+{
+ struct workqueue_struct *workqueue;
+ struct work_struct *work;
+
+ workqueue = create_workqueue("kasan_wq_test");
+ if (!workqueue) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+ work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
+ if (!work) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ INIT_WORK(work, kasan_workqueue_work);
+ queue_work(workqueue, work);
+ destroy_workqueue(workqueue);
+
+ pr_info("use-after-free on workqueue\n");
+ ((volatile struct work_struct *)work)->data;
+}
static int __init test_kasan_module_init(void)
{
@@ -102,6 +130,7 @@ static int __init test_kasan_module_init(void)
copy_user_test();
kasan_rcu_uaf();
+ kasan_workqueue_uaf();
kasan_restore_multi_shot(multishot);
return -EAGAIN;
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index eab52770070d..38c250fbace3 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -877,20 +877,17 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
int (*test_sync)(struct kmod_test_device *test_dev))
{
int ret;
- unsigned long new;
+ unsigned int val;
unsigned int old_val;
- ret = kstrtoul(buf, 10, &new);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
- if (new > UINT_MAX)
- return -EINVAL;
-
mutex_lock(&test_dev->config_mutex);
old_val = *config;
- *(unsigned int *)config = new;
+ *(unsigned int *)config = val;
ret = test_sync(test_dev);
if (ret) {
@@ -914,18 +911,18 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
unsigned int min,
unsigned int max)
{
+ unsigned int val;
int ret;
- unsigned long new;
- ret = kstrtoul(buf, 10, &new);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
- if (new < min || new > max)
+ if (val < min || val > max)
return -EINVAL;
mutex_lock(&test_dev->config_mutex);
- *config = new;
+ *config = val;
mutex_unlock(&test_dev->config_mutex);
/* Always return full write size even if we didn't consume all */
@@ -936,18 +933,15 @@ static int test_dev_config_update_int(struct kmod_test_device *test_dev,
const char *buf, size_t size,
int *config)
{
+ int val;
int ret;
- long new;
- ret = kstrtol(buf, 10, &new);
+ ret = kstrtoint(buf, 10, &val);
if (ret)
return ret;
- if (new < INT_MIN || new > INT_MAX)
- return -EINVAL;
-
mutex_lock(&test_dev->config_mutex);
- *config = new;
+ *config = val;
mutex_unlock(&test_dev->config_mutex);
/* Always return full write size even if we didn't consume all */
return size;
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index f1a020bcc763..864554e76973 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -480,6 +480,21 @@ static int __init test_lockup_init(void)
return -EINVAL;
#ifdef CONFIG_DEBUG_SPINLOCK
+#ifdef CONFIG_PREEMPT_RT
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, lock.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, lock.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#else
if (test_magic(lock_spinlock_ptr,
offsetof(spinlock_t, rlock.magic),
SPINLOCK_MAGIC) ||
@@ -494,6 +509,7 @@ static int __init test_lockup_init(void)
SPINLOCK_MAGIC))
return -EINVAL;
#endif
+#endif
if ((wait_state != TASK_RUNNING ||
(call_cond_resched && !reacquire_locks) ||
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 7ac87f18a10f..95a2f82427c7 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -30,11 +30,13 @@
#define PAD_SIZE 16
#define FILL_CHAR '$'
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
+KSTM_MODULE_GLOBALS();
+
static char *test_buffer __initdata;
static char *alloced_buffer __initdata;
+extern bool no_hash_pointers;
+
static int __printf(4, 0) __init
do_test(int bufsize, const char *expect, int elen,
const char *fmt, va_list ap)
@@ -301,6 +303,12 @@ plain(void)
{
int err;
+ if (no_hash_pointers) {
+ pr_warn("skipping plain 'p' tests");
+ skipped_tests += 2;
+ return;
+ }
+
err = plain_hash();
if (err) {
pr_warn("plain 'p' does not appear to be hashed\n");
@@ -644,9 +652,7 @@ static void __init fwnode_pointer(void)
test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
- software_node_unregister(&softnodes[2]);
- software_node_unregister(&softnodes[1]);
- software_node_unregister(&softnodes[0]);
+ software_node_unregister_nodes(softnodes);
}
static void __init
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 98bc92a91662..3750323973f4 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -16,7 +16,7 @@
*/
/*
- * This module provides an interface to the the proc sysctl interfaces. This
+ * This module provides an interface to the proc sysctl interfaces. This
* driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
* system unless explicitly requested by name. You can also build this driver
* into your kernel.
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 9ea10adf7a66..5e5d9355ef49 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -5,32 +5,54 @@
typedef void(*test_ubsan_fp)(void);
+#define UBSAN_TEST(config, ...) do { \
+ pr_info("%s " __VA_ARGS__ "%s(%s=%s)\n", __func__, \
+ sizeof(" " __VA_ARGS__) > 2 ? " " : "", \
+ #config, IS_ENABLED(config) ? "y" : "n"); \
+ } while (0)
+
static void test_ubsan_add_overflow(void)
{
volatile int val = INT_MAX;
+ volatile unsigned int uval = UINT_MAX;
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
val += 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
+ uval += 2;
}
static void test_ubsan_sub_overflow(void)
{
volatile int val = INT_MIN;
+ volatile unsigned int uval = 0;
volatile int val2 = 2;
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
val -= val2;
+
+ UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
+ uval -= val2;
}
static void test_ubsan_mul_overflow(void)
{
volatile int val = INT_MAX / 2;
+ volatile unsigned int uval = UINT_MAX / 2;
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
val *= 3;
+
+ UBSAN_TEST(CONFIG_UBSAN_UNSIGNED_OVERFLOW);
+ uval *= 3;
}
static void test_ubsan_negate_overflow(void)
{
volatile int val = INT_MIN;
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_OVERFLOW);
val = -val;
}
@@ -39,37 +61,67 @@ static void test_ubsan_divrem_overflow(void)
volatile int val = 16;
volatile int val2 = 0;
+ UBSAN_TEST(CONFIG_UBSAN_DIV_ZERO);
val /= val2;
}
static void test_ubsan_shift_out_of_bounds(void)
{
- volatile int val = -1;
- int val2 = 10;
+ volatile int neg = -1, wrap = 4;
+ int val1 = 10;
+ int val2 = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
+ val1 <<= neg;
- val2 <<= val;
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "left overflow");
+ val2 <<= wrap;
}
static void test_ubsan_out_of_bounds(void)
{
- volatile int i = 4, j = 5;
+ volatile int i = 4, j = 5, k = -1;
+ volatile char above[4] = { }; /* Protect surrounding memory. */
volatile int arr[4];
+ volatile char below[4] = { }; /* Protect surrounding memory. */
+ above[0] = below[0];
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
arr[j] = i;
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
+ arr[k] = i;
}
+enum ubsan_test_enum {
+ UBSAN_TEST_ZERO = 0,
+ UBSAN_TEST_ONE,
+ UBSAN_TEST_MAX,
+};
+
static void test_ubsan_load_invalid_value(void)
{
volatile char *dst, *src;
bool val, val2, *ptr;
- char c = 4;
+ enum ubsan_test_enum eval, eval2, *eptr;
+ unsigned char c = 0xff;
+ UBSAN_TEST(CONFIG_UBSAN_BOOL, "bool");
dst = (char *)&val;
src = &c;
*dst = *src;
ptr = &val2;
val2 = val;
+
+ UBSAN_TEST(CONFIG_UBSAN_ENUM, "enum");
+ dst = (char *)&eval;
+ src = &c;
+ *dst = *src;
+
+ eptr = &eval2;
+ eval2 = eval;
}
static void test_ubsan_null_ptr_deref(void)
@@ -77,6 +129,7 @@ static void test_ubsan_null_ptr_deref(void)
volatile int *ptr = NULL;
int val;
+ UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE);
val = *ptr;
}
@@ -85,6 +138,7 @@ static void test_ubsan_misaligned_access(void)
volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
volatile int *ptr, val = 6;
+ UBSAN_TEST(CONFIG_UBSAN_ALIGNMENT);
ptr = (int *)(arr + 1);
*ptr = val;
}
@@ -95,6 +149,7 @@ static void test_ubsan_object_size_mismatch(void)
volatile int val __aligned(8) = 4;
volatile long long *ptr, val2;
+ UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE);
ptr = (long long *)&val;
val2 = *ptr;
}
@@ -104,15 +159,19 @@ static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_sub_overflow,
test_ubsan_mul_overflow,
test_ubsan_negate_overflow,
- test_ubsan_divrem_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
- //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */
test_ubsan_misaligned_access,
test_ubsan_object_size_mismatch,
};
+/* Excluded because they Oops the module. */
+static const test_ubsan_fp skip_ubsan_array[] = {
+ test_ubsan_divrem_overflow,
+ test_ubsan_null_ptr_deref,
+};
+
static int __init test_ubsan_init(void)
{
unsigned int i;
@@ -120,7 +179,6 @@ static int __init test_ubsan_init(void)
for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
test_ubsan_array[i]();
- (void)test_ubsan_null_ptr_deref; /* to avoid unsed-function warning */
return 0;
}
module_init(test_ubsan_init);
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d4f97925dbd8..8294f43f4981 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -289,6 +289,27 @@ static noinline void check_xa_mark_2(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_xa_mark_3(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ XA_STATE(xas, xa, 0x41);
+ void *entry;
+ int count = 0;
+
+ xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
+ xa_set_mark(xa, 0x41, XA_MARK_0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
+ count++;
+ XA_BUG_ON(xa, entry != xa_mk_index(0x40));
+ }
+ XA_BUG_ON(xa, count != 1);
+ rcu_read_unlock();
+ xa_destroy(xa);
+#endif
+}
+
static noinline void check_xa_mark(struct xarray *xa)
{
unsigned long index;
@@ -297,6 +318,7 @@ static noinline void check_xa_mark(struct xarray *xa)
check_xa_mark_1(xa, index);
check_xa_mark_2(xa);
+ check_xa_mark_3(xa);
}
static noinline void check_xa_shrink(struct xarray *xa)
@@ -393,6 +415,9 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
xa_erase_index(xa, 12345678);
xa_erase_index(xa, 5);
XA_BUG_ON(xa, !xa_empty(xa));
@@ -1503,6 +1528,49 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+#ifdef CONFIG_XARRAY_MULTI
+static void check_split_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ unsigned int i = 0;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+
+ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ xas_lock(&xas);
+ xas_split(&xas, xa, order);
+ xas_unlock(&xas);
+
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, entry != xa);
+ i++;
+ }
+ XA_BUG_ON(xa, i != 1 << order);
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_split(struct xarray *xa)
+{
+ unsigned int order;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+ check_split_1(xa, 0, order);
+ check_split_1(xa, 1UL << order, order);
+ check_split_1(xa, 3UL << order, order);
+ }
+}
+#else
+static void check_split(struct xarray *xa) { }
+#endif
+
static void check_align_1(struct xarray *xa, char *name)
{
int i;
@@ -1575,14 +1643,9 @@ static noinline void shadow_remove(struct xarray *xa)
xa_lock(xa);
while ((node = list_first_entry_or_null(&shadow_nodes,
struct xa_node, private_list))) {
- XA_STATE(xas, node->array, 0);
XA_BUG_ON(xa, node->array != xa);
list_del_init(&node->private_list);
- xas.xa_node = xa_parent_locked(node->array, node);
- xas.xa_offset = node->offset;
- xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
- xas_set_update(&xas, test_update_node);
- xas_store(&xas, NULL);
+ xa_delete_node(node, test_update_node);
}
xa_unlock(xa);
}
@@ -1649,6 +1712,26 @@ static noinline void check_account(struct xarray *xa)
#endif
}
+static noinline void check_get_order(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (i = 0; i < 3; i++)
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xa_store_order(xa, i << order, order,
+ xa_mk_index(i << order), GFP_KERNEL);
+ for (j = i << order; j < (i + 1) << order; j++)
+ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
+ xa_erase(xa, i << order);
+ }
+ }
+}
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1697,6 +1780,7 @@ static int xarray_checks(void)
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
@@ -1708,6 +1792,7 @@ static int xarray_checks(void)
check_store_range(&array);
check_store_iter(&array);
check_align(&xa0);
+ check_split(&array);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index c52710964593..cdb9c7658478 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -14,6 +14,14 @@
#include <linux/rbtree.h>
#include <linux/export.h>
+#define __node_2_tq(_n) \
+ rb_entry((_n), struct timerqueue_node, node)
+
+static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
+{
+ return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
+}
+
/**
* timerqueue_add - Adds timer to timerqueue.
*
@@ -26,28 +34,10 @@
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
- struct rb_node **p = &head->rb_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct timerqueue_node *ptr;
- bool leftmost = true;
-
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
- while (*p) {
- parent = *p;
- ptr = rb_entry(parent, struct timerqueue_node, node);
- if (node->expires < ptr->expires) {
- p = &(*p)->rb_left;
- } else {
- p = &(*p)->rb_right;
- leftmost = false;
- }
- }
- rb_link_node(&node->node, parent, p);
- rb_insert_color_cached(&node->node, &head->rb_root, leftmost);
-
- return leftmost;
+ return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
}
EXPORT_SYMBOL_GPL(timerqueue_add);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index ab749ec10ab5..64fd9015ad80 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -193,7 +193,7 @@ startover:
TOKEN_MISMATCH();
block_idx++;
- /* fall through */
+ fallthrough;
case TS_FSM_ANY:
if (next == NULL)
diff --git a/lib/ubsan.c b/lib/ubsan.c
index cb9af3f6b77e..bec38c64d6a6 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -17,7 +17,7 @@
#include "ubsan.h"
-const char *type_check_kinds[] = {
+static const char * const type_check_kinds[] = {
"load of",
"store to",
"reference binding to",
@@ -427,3 +427,34 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 7b56c09473a9..9a0b71c5ff9f 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -78,6 +78,12 @@ struct invalid_value_data {
struct type_descriptor *type;
};
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
diff --git a/lib/usercopy.c b/lib/usercopy.c
index b26509f112f9..7413dd300516 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/uaccess.h>
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(from, n))) {
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
@@ -25,6 +26,8 @@ EXPORT_SYMBOL(_copy_from_user);
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
+ if (should_fail_usercopy())
+ return n;
if (likely(access_ok(to, n))) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 14c9a6af1b23..41ddc353ebb8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1265,7 +1265,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
case 'R':
reversed = true;
- /* fall through */
+ fallthrough;
default:
separator = ':';
@@ -1682,7 +1682,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
switch (*(++fmt)) {
case 'L':
uc = true;
- /* fall through */
+ fallthrough;
case 'l':
index = guid_index;
break;
@@ -2090,6 +2090,32 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
return widen_string(buf, buf - buf_start, end, spec);
}
+/* Disable pointer hashing if requested */
+bool no_hash_pointers __ro_after_init;
+EXPORT_SYMBOL_GPL(no_hash_pointers);
+
+static int __init no_hash_pointers_enable(char *str)
+{
+ no_hash_pointers = true;
+
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** This system shows unhashed kernel memory addresses **\n");
+ pr_warn("** via the console, logs, and other interfaces. This **\n");
+ pr_warn("** might reduce the security of your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging **\n");
+ pr_warn("** the kernel, report this immediately to your system **\n");
+ pr_warn("** administrator! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+
+ return 0;
+}
+early_param("no_hash_pointers", no_hash_pointers_enable);
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2219,7 +2245,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
- /* fall through */
+ fallthrough;
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
@@ -2297,8 +2323,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
}
- /* default is to _not_ leak addresses, hash before printing */
- return ptr_to_id(buf, end, ptr, spec);
+ /*
+ * default is to _not_ leak addresses, so hash before printing,
+ * unless no_hash_pointers is specified on the command line.
+ */
+ if (unlikely(no_hash_pointers))
+ return pointer_string(buf, end, ptr, spec);
+ else
+ return ptr_to_id(buf, end, ptr, spec);
}
/*
@@ -2450,7 +2482,7 @@ qualifier:
case 'x':
spec->flags |= SMALL;
- /* fall through */
+ fallthrough;
case 'X':
spec->base = 16;
@@ -2459,6 +2491,7 @@ qualifier:
case 'd':
case 'i':
spec->flags |= SIGN;
+ break;
case 'u':
break;
@@ -2468,7 +2501,7 @@ qualifier:
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
- /* fall through */
+ fallthrough;
default:
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
@@ -3411,10 +3444,10 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
case 'i':
base = 0;
- /* fall through */
+ fallthrough;
case 'd':
is_sign = true;
- /* fall through */
+ fallthrough;
case 'u':
break;
case '%':
diff --git a/lib/xarray.c b/lib/xarray.c
index e9e641d3c0c3..5fa51614802a 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
*/
static void xas_destroy(struct xa_state *xas)
{
- struct xa_node *node = xas->xa_alloc;
+ struct xa_node *next, *node = xas->xa_alloc;
- if (!node)
- return;
- XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
- xas->xa_alloc = NULL;
+ while (node) {
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ next = rcu_dereference_raw(node->parent);
+ radix_tree_node_rcu_free(&node->rcu_head);
+ xas->xa_alloc = node = next;
+ }
}
/**
@@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (!xas->xa_alloc)
return false;
+ xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
@@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
- * multiple-index entry at index 0, the calculation is a little more complex
+ * multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
@@ -703,7 +706,7 @@ void xas_create_range(struct xa_state *xas)
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
- xas->xa_index |= ((sibs + 1) << shift) - 1;
+ xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
@@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_init_marks);
+#ifdef CONFIG_XARRAY_MULTI
+static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
+{
+ unsigned int marks = 0;
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (node_get_mark(node, offset, mark))
+ marks |= 1 << (__force unsigned int)mark;
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ return marks;
+}
+
+static void node_set_marks(struct xa_node *node, unsigned int offset,
+ struct xa_node *child, unsigned int marks)
+{
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (marks & (1 << (__force unsigned int)mark)) {
+ node_set_mark(node, offset, mark);
+ if (child)
+ node_mark_all(child, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+
+/**
+ * xas_split_alloc() - Allocate memory for splitting an entry.
+ * @xas: XArray operation state.
+ * @entry: New entry which will be stored in the array.
+ * @order: New entry order.
+ * @gfp: Memory allocation flags.
+ *
+ * This function should be called before calling xas_split().
+ * If necessary, it will allocate new nodes (and fill them with @entry)
+ * to prepare for the upcoming split of an entry of @order size into
+ * entries of the order stored in the @xas.
+ *
+ * Context: May sleep if @gfp flags permit.
+ */
+void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ gfp_t gfp)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int mask = xas->xa_sibs;
+
+ /* XXX: no support for splitting really large entries yet */
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ goto nomem;
+ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
+ return;
+
+ do {
+ unsigned int i;
+ void *sibling;
+ struct xa_node *node;
+
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ if (!node)
+ goto nomem;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(0);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
+ xas->xa_alloc = node;
+ } while (sibs-- > 0);
+
+ return;
+nomem:
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+}
+EXPORT_SYMBOL_GPL(xas_split_alloc);
+
+/**
+ * xas_split() - Split a multi-index entry into smaller entries.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: New entry order.
+ *
+ * The value in the entry is copied to all the replacement entries.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+ do {
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ } else {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ }
+ } while (offset-- > xas->xa_offset);
+
+ node->nr_values += values;
+}
+EXPORT_SYMBOL_GPL(xas_split);
+#endif
+
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
@@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
@@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
+ * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
@@ -1592,6 +1742,46 @@ unlock:
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ int order = 0;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+
+ if (!entry)
+ goto unlock;
+
+ if (!xas.xa_node)
+ goto unlock;
+
+ for (;;) {
+ unsigned int slot = xas.xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+ if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ break;
+ order++;
+ }
+
+ order += xas.xa_node->shift;
+unlock:
+ rcu_read_unlock();
+
+ return order;
+}
+EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**
@@ -1974,6 +2164,29 @@ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
EXPORT_SYMBOL(xa_extract);
/**
+ * xa_delete_node() - Private interface for workingset code.
+ * @node: Node to be removed from the tree.
+ * @update: Function to call to update ancestor nodes.
+ *
+ * Context: xa_lock must be held on entry and will not be released.
+ */
+void xa_delete_node(struct xa_node *node, xa_update_node_t update)
+{
+ struct xa_state xas = {
+ .xa = node->array,
+ .xa_index = (unsigned long)node->offset <<
+ (node->shift + XA_CHUNK_SHIFT),
+ .xa_shift = node->shift + XA_CHUNK_SHIFT,
+ .xa_offset = node->offset,
+ .xa_node = xa_parent_locked(node->array, node),
+ .xa_update = update,
+ };
+
+ xas_store(&xas, NULL);
+}
+EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
+
+/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 65a1aad8c223..ca2603abee08 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1043,7 +1043,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.sequence = SEQ_LZMA_PREPARE;
- /* fall through */
+ fallthrough;
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
@@ -1055,7 +1055,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
- /* fall through */
+ fallthrough;
case SEQ_LZMA_RUN:
/*
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index 32ab2a08b7cb..fea86deaaa01 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -583,7 +583,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
if (ret != XZ_OK)
return ret;
- /* fall through */
+ fallthrough;
case SEQ_BLOCK_START:
/* We need one byte of input to continue. */
@@ -608,7 +608,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.pos = 0;
s->sequence = SEQ_BLOCK_HEADER;
- /* fall through */
+ fallthrough;
case SEQ_BLOCK_HEADER:
if (!fill_temp(s, b))
@@ -620,7 +620,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_UNCOMPRESS;
- /* fall through */
+ fallthrough;
case SEQ_BLOCK_UNCOMPRESS:
ret = dec_block(s, b);
@@ -629,7 +629,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_PADDING;
- /* fall through */
+ fallthrough;
case SEQ_BLOCK_PADDING:
/*
@@ -651,7 +651,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_CHECK;
- /* fall through */
+ fallthrough;
case SEQ_BLOCK_CHECK:
if (s->check_type == XZ_CHECK_CRC32) {
@@ -675,7 +675,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_PADDING;
- /* fall through */
+ fallthrough;
case SEQ_INDEX_PADDING:
while ((s->index.size + (b->in_pos - s->in_start))
@@ -699,7 +699,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_CRC32;
- /* fall through */
+ fallthrough;
case SEQ_INDEX_CRC32:
ret = crc32_validate(s, b);
@@ -709,7 +709,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.size = STREAM_HEADER_SIZE;
s->sequence = SEQ_STREAM_FOOTER;
- /* fall through */
+ fallthrough;
case SEQ_STREAM_FOOTER:
if (!fill_temp(s, b))
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
index 8e4d5afbbb10..66e1c96387c4 100644
--- a/lib/zlib_dfltcc/Makefile
+++ b/lib/zlib_dfltcc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
-zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o dfltcc_syms.o
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
index c30de430b30c..782f76e9d4da 100644
--- a/lib/zlib_dfltcc/dfltcc.c
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: Zlib
/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
-#include <linux/zutil.h>
+#include <linux/export.h>
+#include <linux/module.h>
#include "dfltcc_util.h"
#include "dfltcc.h"
@@ -53,3 +54,6 @@ void dfltcc_reset(
dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
dfltcc_state->param.ribm = DFLTCC_RIBM;
}
+EXPORT_SYMBOL(dfltcc_reset);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
index 00c185101c6d..6c946e8532ee 100644
--- a/lib/zlib_dfltcc/dfltcc_deflate.c
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -4,6 +4,7 @@
#include "dfltcc_util.h"
#include "dfltcc.h"
#include <asm/setup.h>
+#include <linux/export.h>
#include <linux/zutil.h>
/*
@@ -34,6 +35,7 @@ int dfltcc_can_deflate(
return 1;
}
+EXPORT_SYMBOL(dfltcc_can_deflate);
static void dfltcc_gdht(
z_streamp strm
@@ -277,3 +279,4 @@ again:
goto again; /* deflate() must use all input or all output */
return 1;
}
+EXPORT_SYMBOL(dfltcc_deflate);
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
index aa9ef23474df..fb60b5a6a1cb 100644
--- a/lib/zlib_dfltcc/dfltcc_inflate.c
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -4,6 +4,7 @@
#include "dfltcc_util.h"
#include "dfltcc.h"
#include <asm/setup.h>
+#include <linux/export.h>
#include <linux/zutil.h>
/*
@@ -29,6 +30,7 @@ int dfltcc_can_inflate(
return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
}
+EXPORT_SYMBOL(dfltcc_can_inflate);
static int dfltcc_was_inflate_used(
z_streamp strm
@@ -123,7 +125,7 @@ dfltcc_inflate_action dfltcc_inflate(
param->ho = (state->write - state->whave) & ((1 << HB_BITS) - 1);
if (param->hl)
param->nt = 0; /* Honor history for the first block */
- param->cv = state->flags ? REVERSE(state->check) : state->check;
+ param->cv = state->check;
/* Inflate */
do {
@@ -136,7 +138,7 @@ dfltcc_inflate_action dfltcc_inflate(
state->bits = param->sbb;
state->whave = param->hl;
state->write = (param->ho + param->hl) & ((1 << HB_BITS) - 1);
- state->check = state->flags ? REVERSE(param->cv) : param->cv;
+ state->check = param->cv;
if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
/* Report an error if stream is corrupted */
state->mode = BAD;
@@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate(
return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
}
+EXPORT_SYMBOL(dfltcc_inflate);
diff --git a/lib/zlib_dfltcc/dfltcc_syms.c b/lib/zlib_dfltcc/dfltcc_syms.c
deleted file mode 100644
index 6f23481804c1..000000000000
--- a/lib/zlib_dfltcc/dfltcc_syms.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/lib/zlib_dfltcc/dfltcc_syms.c
- *
- * Exported symbols for the s390 zlib dfltcc support.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include "dfltcc.h"
-
-EXPORT_SYMBOL(dfltcc_can_deflate);
-EXPORT_SYMBOL(dfltcc_deflate);
-EXPORT_SYMBOL(dfltcc_reset);
-MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 67cc9b08ae9d..ee39b5eb71f7 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -396,7 +396,7 @@ int zlib_inflate(z_streamp strm, int flush)
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
- /* fall through */
+ fallthrough;
case DICT:
if (state->havedict == 0) {
RESTORE();
@@ -404,10 +404,10 @@ int zlib_inflate(z_streamp strm, int flush)
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
- /* fall through */
+ fallthrough;
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
- /* fall through */
+ fallthrough;
case TYPEDO:
INFLATE_TYPEDO_HOOK(strm, flush);
if (state->last) {
@@ -446,7 +446,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
- /* fall through */
+ fallthrough;
case COPY:
copy = state->length;
if (copy) {
@@ -480,7 +480,7 @@ int zlib_inflate(z_streamp strm, int flush)
#endif
state->have = 0;
state->mode = LENLENS;
- /* fall through */
+ fallthrough;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
@@ -501,7 +501,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->have = 0;
state->mode = CODELENS;
- /* fall through */
+ fallthrough;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
@@ -575,7 +575,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = LEN;
- /* fall through */
+ fallthrough;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
@@ -615,7 +615,7 @@ int zlib_inflate(z_streamp strm, int flush)
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
- /* fall through */
+ fallthrough;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -623,7 +623,7 @@ int zlib_inflate(z_streamp strm, int flush)
DROPBITS(state->extra);
}
state->mode = DIST;
- /* fall through */
+ fallthrough;
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
@@ -649,7 +649,7 @@ int zlib_inflate(z_streamp strm, int flush)
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
- /* fall through */
+ fallthrough;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
@@ -669,7 +669,7 @@ int zlib_inflate(z_streamp strm, int flush)
break;
}
state->mode = MATCH;
- /* fall through */
+ fallthrough;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
@@ -720,7 +720,7 @@ int zlib_inflate(z_streamp strm, int flush)
INITBITS();
}
state->mode = DONE;
- /* fall through */
+ fallthrough;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
index 3a49784d5c61..5d6343c1a909 100644
--- a/lib/zstd/bitstream.h
+++ b/lib/zstd/bitstream.h
@@ -259,16 +259,17 @@ ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, s
bitD->bitContainer = *(const BYTE *)(bitD->start);
switch (srcSize) {
case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
- /* fall through */
+ fallthrough;
case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
- /* fall through */
+ fallthrough;
case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
- /* fall through */
+ fallthrough;
case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
- /* fall through */
+ fallthrough;
case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
- /* fall through */
+ fallthrough;
case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
+ fallthrough;
default:;
}
{
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
index 5e0b67003e55..b080264ed3ad 100644
--- a/lib/zstd/compress.c
+++ b/lib/zstd/compress.c
@@ -3182,7 +3182,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *
zcs->outBuffFlushedSize = 0;
zcs->stage = zcss_flush; /* pass-through to flush stage */
}
- /* fall through */
+ fallthrough;
case zcss_flush: {
size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
index db6761ea4deb..66cd487a326a 100644
--- a/lib/zstd/decompress.c
+++ b/lib/zstd/decompress.c
@@ -442,7 +442,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize
case set_repeat:
if (dctx->litEntropy == 0)
return ERROR(dictionary_corrupted);
- /* fall through */
+ fallthrough;
case set_compressed:
if (srcSize < 5)
return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
@@ -1768,7 +1768,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, c
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
- /* fall through */
+ fallthrough;
case ZSTDds_decodeFrameHeader:
memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
@@ -2309,7 +2309,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
switch (zds->stage) {
case zdss_init:
ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
- /* fall through */
+ fallthrough;
case zdss_loadHeader: {
size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
@@ -2376,7 +2376,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
}
zds->stage = zdss_read;
}
- /* fall through */
+ fallthrough;
case zdss_read: {
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
@@ -2405,7 +2405,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
zds->stage = zdss_load;
/* pass-through */
}
- /* fall through */
+ fallthrough;
case zdss_load: {
size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
@@ -2438,7 +2438,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
/* pass-through */
}
}
- /* fall through */
+ fallthrough;
case zdss_flush: {
size_t const toFlushSize = zds->outEnd - zds->outStart;
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
index e727812d12aa..fd32838c185f 100644
--- a/lib/zstd/huf_compress.c
+++ b/lib/zstd/huf_compress.c
@@ -556,10 +556,11 @@ size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, si
n = srcSize & ~3; /* join to mod 4 */
switch (srcSize & 3) {
case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
- /* fall through */
+ fallthrough;
case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
- /* fall through */
+ fallthrough;
case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
+ fallthrough;
case 0:
default:;
}