summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug26
-rw-r--r--lib/alloc_tag.c6
-rw-r--r--lib/bug.c22
-rw-r--r--lib/idr.c67
-rw-r--r--lib/interval_tree.c12
-rw-r--r--lib/interval_tree_test.c237
-rw-r--r--lib/maple_tree.c10
-rw-r--r--lib/min_heap.c4
-rw-r--r--lib/plist.c12
-rw-r--r--lib/rbtree_test.c30
-rw-r--r--lib/sg_split.c2
-rw-r--r--lib/sort.c110
-rw-r--r--lib/stackdepot.c10
-rw-r--r--lib/test_hmm.c72
-rw-r--r--lib/test_ida.c70
-rw-r--r--lib/test_xarray.c52
-rwxr-xr-xlib/tests/module/gen_test_kallsyms.sh2
-rw-r--r--lib/vdso/datastore.c3
-rw-r--r--lib/vsprintf.c12
-rw-r--r--lib/xarray.c157
-rw-r--r--lib/zlib_deflate/deflate.c6
21 files changed, 763 insertions, 159 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0ffd5526bd46..df9587aa5c5e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1280,6 +1280,17 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
+config DETECT_HUNG_TASK_BLOCKER
+ bool "Dump Hung Tasks Blocker"
+ depends on DETECT_HUNG_TASK
+ depends on !PREEMPT_RT
+ default y
+ help
+ Say Y here to show the blocker task's stacktrace who acquires
+ the mutex lock which "hung tasks" are waiting.
+ This will add overhead a bit but shows suspicious tasks and
+ call trace if it comes from waiting a mutex.
+
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL
@@ -2502,13 +2513,20 @@ config TEST_IDA
tristate "Perform selftest on IDA functions"
config TEST_MISC_MINOR
- tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS
+ tristate "miscdevice KUnit test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
- Kunit test for the misc minor.
- It tests misc minor functions for dynamic and misc dynamic minor.
- This include misc_xxx functions
+ Kunit test for miscdevice API, specially its behavior in respect to
+ static and dynamic minor numbers.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 19b45617bdcf..1d893e313614 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
if (!mem_alloc_profiling_enabled())
return;
- tag = pgalloc_tag_get(&folio->page);
+ tag = __pgalloc_tag_get(&folio->page);
if (!tag)
return;
@@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old)
if (!mem_alloc_profiling_enabled())
return;
- tag_old = pgalloc_tag_get(&old->page);
+ tag_old = __pgalloc_tag_get(&old->page);
if (!tag_old)
return;
- tag_new = pgalloc_tag_get(&new->page);
+ tag_new = __pgalloc_tag_get(&new->page);
if (!tag_new)
return;
diff --git a/lib/bug.c b/lib/bug.c
index e0ff21989990..b1f07459c2ee 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -66,23 +66,19 @@ static LIST_HEAD(module_bug_list);
static struct bug_entry *module_find_bug(unsigned long bugaddr)
{
+ struct bug_entry *bug;
struct module *mod;
- struct bug_entry *bug = NULL;
- rcu_read_lock_sched();
+ guard(rcu)();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
unsigned i;
bug = mod->bug_table;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug_addr(bug))
- goto out;
+ return bug;
}
- bug = NULL;
-out:
- rcu_read_unlock_sched();
-
- return bug;
+ return NULL;
}
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
@@ -235,11 +231,11 @@ void generic_bug_clear_once(void)
#ifdef CONFIG_MODULES
struct module *mod;
- rcu_read_lock_sched();
- list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
- clear_once_table(mod->bug_table,
- mod->bug_table + mod->num_bugs);
- rcu_read_unlock_sched();
+ scoped_guard(rcu) {
+ list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
+ clear_once_table(mod->bug_table,
+ mod->bug_table + mod->num_bugs);
+ }
#endif
clear_once_table(__start___bug_table, __stop___bug_table);
diff --git a/lib/idr.c b/lib/idr.c
index da36054c3ca0..e2adc457abb4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -477,6 +477,73 @@ nospc:
EXPORT_SYMBOL(ida_alloc_range);
/**
+ * ida_find_first_range - Get the lowest used ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to get.
+ * @max: Highest ID to get.
+ *
+ * Get the lowest used ID between @min and @max, inclusive. The returned
+ * ID will not exceed %INT_MAX, even if @max is larger.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * Return: The lowest used ID, or errno if no used ID is found.
+ */
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max)
+{
+ unsigned long index = min / IDA_BITMAP_BITS;
+ unsigned int offset = min % IDA_BITMAP_BITS;
+ unsigned long *addr, size, bit;
+ unsigned long tmp = 0;
+ unsigned long flags;
+ void *entry;
+ int ret;
+
+ if ((int)min < 0)
+ return -EINVAL;
+ if ((int)max < 0)
+ max = INT_MAX;
+
+ xa_lock_irqsave(&ida->xa, flags);
+
+ entry = xa_find(&ida->xa, &index, max / IDA_BITMAP_BITS, XA_PRESENT);
+ if (!entry) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (index > min / IDA_BITMAP_BITS)
+ offset = 0;
+ if (index * IDA_BITMAP_BITS + offset > max) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (xa_is_value(entry)) {
+ tmp = xa_to_value(entry);
+ addr = &tmp;
+ size = BITS_PER_XA_VALUE;
+ } else {
+ addr = ((struct ida_bitmap *)entry)->bitmap;
+ size = IDA_BITMAP_BITS;
+ }
+
+ bit = find_next_bit(addr, size, offset);
+
+ xa_unlock_irqrestore(&ida->xa, flags);
+
+ if (bit == size ||
+ index * IDA_BITMAP_BITS + bit > max)
+ return -ENOENT;
+
+ return index * IDA_BITMAP_BITS + bit;
+
+err_unlock:
+ xa_unlock_irqrestore(&ida->xa, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ida_find_first_range);
+
+/**
* ida_free() - Release an allocated ID.
* @ida: IDA handle.
* @id: Previously allocated ID.
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index 3412737ff365..324766e9bf63 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -20,9 +20,15 @@ EXPORT_SYMBOL_GPL(interval_tree_iter_next);
/*
* Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
* span of nodes. This makes nodes[0]->last the end of that contiguous used span
- * indexes that started at the original nodes[1]->start. nodes[1] is now the
- * first node starting the next used span. A hole span is between nodes[0]->last
- * and nodes[1]->start. nodes[1] must be !NULL.
+ * of indexes that started at the original nodes[1]->start.
+ *
+ * If there is an interior hole, nodes[1] is now the first node starting the
+ * next used span. A hole span is between nodes[0]->last and nodes[1]->start.
+ *
+ * If there is a tailing hole, nodes[1] is now NULL. A hole span is between
+ * nodes[0]->last and last_index.
+ *
+ * If the contiguous used range span to last_index, nodes[1] is set to NULL.
*/
static void
interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 837064b83a6c..5fd62656f42e 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -5,6 +5,8 @@
#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
+#include <linux/bitmap.h>
+#include <linux/maple_tree.h>
#define __param(type, name, init, msg) \
static type name = init; \
@@ -19,6 +21,7 @@ __param(int, search_loops, 1000, "Number of iterations searching the tree");
__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
+__param(ullong, seed, 3141592653589793238ULL, "Random seed");
static struct rb_root_cached root = RB_ROOT_CACHED;
static struct interval_tree_node *nodes = NULL;
@@ -59,26 +62,13 @@ static void init(void)
queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
}
-static int interval_tree_test_init(void)
+static int basic_check(void)
{
int i, j;
- unsigned long results;
cycles_t time1, time2, time;
- nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
- GFP_KERNEL);
- if (!nodes)
- return -ENOMEM;
-
- queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
- if (!queries) {
- kfree(nodes);
- return -ENOMEM;
- }
-
printk(KERN_ALERT "interval tree insert/remove");
- prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
@@ -96,8 +86,19 @@ static int interval_tree_test_init(void)
time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
+ return 0;
+}
+
+static int search_check(void)
+{
+ int i, j;
+ unsigned long results;
+ cycles_t time1, time2, time;
+
printk(KERN_ALERT "interval tree search");
+ init();
+
for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
@@ -120,6 +121,214 @@ static int interval_tree_test_init(void)
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+
+ return 0;
+}
+
+static int intersection_range_check(void)
+{
+ int i, j, k;
+ unsigned long start, last;
+ struct interval_tree_node *node;
+ unsigned long *intxn1;
+ unsigned long *intxn2;
+
+ printk(KERN_ALERT "interval tree iteration\n");
+
+ intxn1 = bitmap_alloc(nnodes, GFP_KERNEL);
+ if (!intxn1) {
+ WARN_ON_ONCE("Failed to allocate intxn1\n");
+ return -ENOMEM;
+ }
+
+ intxn2 = bitmap_alloc(nnodes, GFP_KERNEL);
+ if (!intxn2) {
+ WARN_ON_ONCE("Failed to allocate intxn2\n");
+ bitmap_free(intxn1);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < search_loops; i++) {
+ /* Initialize interval tree for each round */
+ init();
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ /* Let's try nsearches different ranges */
+ for (k = 0; k < nsearches; k++) {
+ /* Try whole range once */
+ if (!k) {
+ start = 0UL;
+ last = ULONG_MAX;
+ } else {
+ last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ start = (prandom_u32_state(&rnd) >> 4) % last;
+ }
+
+ /* Walk nodes to mark intersection nodes */
+ bitmap_zero(intxn1, nnodes);
+ for (j = 0; j < nnodes; j++) {
+ node = nodes + j;
+
+ if (start <= node->last && last >= node->start)
+ bitmap_set(intxn1, j, 1);
+ }
+
+ /* Iterate tree to clear intersection nodes */
+ bitmap_zero(intxn2, nnodes);
+ for (node = interval_tree_iter_first(&root, start, last); node;
+ node = interval_tree_iter_next(node, start, last))
+ bitmap_set(intxn2, node - nodes, 1);
+
+ WARN_ON_ONCE(!bitmap_equal(intxn1, intxn2, nnodes));
+ }
+
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+
+ bitmap_free(intxn1);
+ bitmap_free(intxn2);
+ return 0;
+}
+
+#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
+/*
+ * Helper function to get span of current position from maple tree point of
+ * view.
+ */
+static void mas_cur_span(struct ma_state *mas, struct interval_tree_span_iter *state)
+{
+ unsigned long cur_start;
+ unsigned long cur_last;
+ int is_hole;
+
+ if (mas->status == ma_overflow)
+ return;
+
+ /* walk to current position */
+ state->is_hole = mas_walk(mas) ? 0 : 1;
+
+ cur_start = mas->index < state->first_index ?
+ state->first_index : mas->index;
+
+ /* whether we have followers */
+ do {
+
+ cur_last = mas->last > state->last_index ?
+ state->last_index : mas->last;
+
+ is_hole = mas_next_range(mas, state->last_index) ? 0 : 1;
+
+ } while (mas->status != ma_overflow && is_hole == state->is_hole);
+
+ if (state->is_hole) {
+ state->start_hole = cur_start;
+ state->last_hole = cur_last;
+ } else {
+ state->start_used = cur_start;
+ state->last_used = cur_last;
+ }
+
+ /* advance position for next round */
+ if (mas->status != ma_overflow)
+ mas_set(mas, cur_last + 1);
+}
+
+static int span_iteration_check(void)
+{
+ int i, j, k;
+ unsigned long start, last;
+ struct interval_tree_span_iter span, mas_span;
+
+ DEFINE_MTREE(tree);
+
+ MA_STATE(mas, &tree, 0, 0);
+
+ printk(KERN_ALERT "interval tree span iteration\n");
+
+ for (i = 0; i < search_loops; i++) {
+ /* Initialize interval tree for each round */
+ init();
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ /* Put all the range into maple tree */
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ mt_set_in_rcu(&tree);
+
+ for (j = 0; j < nnodes; j++)
+ WARN_ON_ONCE(mtree_store_range(&tree, nodes[j].start,
+ nodes[j].last, nodes + j, GFP_KERNEL));
+
+ /* Let's try nsearches different ranges */
+ for (k = 0; k < nsearches; k++) {
+ /* Try whole range once */
+ if (!k) {
+ start = 0UL;
+ last = ULONG_MAX;
+ } else {
+ last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ start = (prandom_u32_state(&rnd) >> 4) % last;
+ }
+
+ mas_span.first_index = start;
+ mas_span.last_index = last;
+ mas_span.is_hole = -1;
+ mas_set(&mas, start);
+
+ interval_tree_for_each_span(&span, &root, start, last) {
+ mas_cur_span(&mas, &mas_span);
+
+ WARN_ON_ONCE(span.is_hole != mas_span.is_hole);
+
+ if (span.is_hole) {
+ WARN_ON_ONCE(span.start_hole != mas_span.start_hole);
+ WARN_ON_ONCE(span.last_hole != mas_span.last_hole);
+ } else {
+ WARN_ON_ONCE(span.start_used != mas_span.start_used);
+ WARN_ON_ONCE(span.last_used != mas_span.last_used);
+ }
+ }
+
+ }
+
+ WARN_ON_ONCE(mas.status != ma_overflow);
+
+ /* Cleanup maple tree for each round */
+ mtree_destroy(&tree);
+ /* Cleanup interval tree for each round */
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+ return 0;
+}
+#else
+static inline int span_iteration_check(void) {return 0; }
+#endif
+
+static int interval_tree_test_init(void)
+{
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
+ if (!queries) {
+ kfree(nodes);
+ return -ENOMEM;
+ }
+
+ prandom_seed_state(&rnd, seed);
+
+ basic_check();
+ search_check();
+ intersection_range_check();
+ span_iteration_check();
+
kfree(queries);
kfree(nodes);
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index f7153ade1be5..d0bea23fa4bc 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -584,13 +584,10 @@ static __always_inline bool ma_dead_node(const struct maple_node *node)
*/
static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
- struct maple_node *parent, *node;
+ struct maple_node *node;
node = mte_to_node(enode);
- /* Do not reorder reads from the node prior to the parent check */
- smp_rmb();
- parent = mte_parent(enode);
- return (parent == node);
+ return ma_dead_node(node);
}
/*
@@ -1245,7 +1242,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
- BUG_ON(!allocated);
WARN_ON(!allocated);
}
@@ -1353,7 +1349,7 @@ static void mas_node_count(struct ma_state *mas, int count)
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
- * If mas->status == mas_start, then set the min, max and depth to
+ * If mas->status == ma_start, then set the min, max and depth to
* defaults.
*
* Return:
diff --git a/lib/min_heap.c b/lib/min_heap.c
index 4485372ff3b1..96f01a4c5fb6 100644
--- a/lib/min_heap.c
+++ b/lib/min_heap.c
@@ -2,7 +2,7 @@
#include <linux/export.h>
#include <linux/min_heap.h>
-void __min_heap_init(min_heap_char *heap, void *data, int size)
+void __min_heap_init(min_heap_char *heap, void *data, size_t size)
{
__min_heap_init_inline(heap, data, size);
}
@@ -20,7 +20,7 @@ bool __min_heap_full(min_heap_char *heap)
}
EXPORT_SYMBOL(__min_heap_full);
-void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
const struct min_heap_callbacks *func, void *args)
{
__min_heap_sift_down_inline(heap, pos, elem_size, func, args);
diff --git a/lib/plist.c b/lib/plist.c
index c6bce1226874..330febb4bd7d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -171,12 +171,24 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
plist_del(node, head);
+ /*
+ * After plist_del(), iter is the replacement of the node. If the node
+ * was on prio_list, take shortcut to find node_next instead of looping.
+ */
+ if (!list_empty(&iter->prio_list)) {
+ iter = list_entry(iter->prio_list.next, struct plist_node,
+ prio_list);
+ node_next = &iter->node_list;
+ goto queue;
+ }
+
plist_for_each_continue(iter, head) {
if (node->prio != iter->prio) {
node_next = &iter->node_list;
break;
}
}
+queue:
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 8655a76d29a1..690cede46ac2 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -14,6 +14,7 @@
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
+__param(ullong, seed, 3141592653589793238ULL, "Random seed");
struct test_node {
u32 key;
@@ -239,19 +240,14 @@ static void check_augmented(int nr_nodes)
}
}
-static int __init rbtree_test_init(void)
+static int basic_check(void)
{
int i, j;
cycles_t time1, time2, time;
struct rb_node *node;
- nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
- if (!nodes)
- return -ENOMEM;
-
printk(KERN_ALERT "rbtree testing");
- prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
@@ -343,6 +339,14 @@ static int __init rbtree_test_init(void)
check(0);
}
+ return 0;
+}
+
+static int augmented_check(void)
+{
+ int i, j;
+ cycles_t time1, time2, time;
+
printk(KERN_ALERT "augmented rbtree testing");
init();
@@ -390,6 +394,20 @@ static int __init rbtree_test_init(void)
check_augmented(0);
}
+ return 0;
+}
+
+static int __init rbtree_test_init(void)
+{
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ prandom_seed_state(&rnd, seed);
+
+ basic_check();
+ augmented_check();
+
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 60a0babebf2e..0f89aab5c671 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
if (!j) {
out_sg->offset += split->skip_sg0;
out_sg->length -= split->skip_sg0;
- } else {
- out_sg->offset = 0;
}
sg_dma_address(out_sg) = 0;
sg_dma_len(out_sg) = 0;
diff --git a/lib/sort.c b/lib/sort.c
index 8e73dc55476b..52363995ccc5 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -186,36 +186,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
return i / 2;
}
-/**
- * sort_r - sort an array of elements
- * @base: pointer to data to sort
- * @num: number of elements
- * @size: size of each element
- * @cmp_func: pointer to comparison function
- * @swap_func: pointer to swap function or NULL
- * @priv: third argument passed to comparison function
- *
- * This function does a heapsort on the given array. You may provide
- * a swap_func function if you need to do something more than a memory
- * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
- * avoids a slow retpoline and so is significantly faster.
- *
- * The comparison function must adhere to specific mathematical
- * properties to ensure correct and stable sorting:
- * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
- * cmp_func(b, a).
- * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
- * cmp_func(a, c) <= 0.
- *
- * Sorting time is O(n log n) both on average and worst-case. While
- * quicksort is slightly faster on average, it suffers from exploitable
- * O(n*n) worst-case behavior and extra memory requirements that make
- * it less suitable for kernel use.
- */
-void sort_r(void *base, size_t num, size_t size,
- cmp_r_func_t cmp_func,
- swap_r_func_t swap_func,
- const void *priv)
+#include <linux/sched.h>
+
+static void __sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv,
+ bool may_schedule)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
@@ -286,6 +263,9 @@ void sort_r(void *base, size_t num, size_t size,
b = parent(b, lsbit, size);
do_swap(base + b, base + c, size, swap_func, priv);
}
+
+ if (may_schedule)
+ cond_resched();
}
n -= size;
@@ -293,8 +273,63 @@ void sort_r(void *base, size_t num, size_t size,
if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
do_swap(base, base + size, size, swap_func, priv);
}
+
+/**
+ * sort_r - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * This function does a heapsort on the given array. You may provide
+ * a swap_func function if you need to do something more than a memory
+ * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
+ * avoids a slow retpoline and so is significantly faster.
+ *
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * quicksort is slightly faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+void sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, false);
+}
EXPORT_SYMBOL(sort_r);
+/**
+ * sort_r_nonatomic - sort an array of elements, with cond_resched
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * Same as sort_r, but preferred for larger arrays as it does a periodic
+ * cond_resched().
+ */
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, true);
+}
+EXPORT_SYMBOL(sort_r_nonatomic);
+
void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
@@ -304,6 +339,19 @@ void sort(void *base, size_t num, size_t size,
.swap = swap_func,
};
- return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false);
}
EXPORT_SYMBOL(sort);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true);
+}
+EXPORT_SYMBOL(sort_nonatomic);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 245d5b416699..73d7b50924ef 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -591,7 +591,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
- bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
+ bool allow_spin = gfpflags_allow_spinning(alloc_flags);
+ bool can_alloc = (depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC) && allow_spin;
unsigned long flags;
u32 hash;
@@ -630,7 +631,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
prealloc = page_address(page);
}
- if (in_nmi()) {
+ if (in_nmi() || !allow_spin) {
/* We can never allocate in NMI context. */
WARN_ON_ONCE(can_alloc);
/* Best effort; bail if we fail to take the lock. */
@@ -671,7 +672,10 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
- free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
+ if (!allow_spin)
+ free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER);
+ else
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
handle = found->handle.handle;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 056f2e411d7b..5b144bc5c4ec 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -195,7 +195,8 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp)
static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct dmirror_chunk, pagemap);
+ return container_of(page_pgmap(page), struct dmirror_chunk,
+ pagemap);
}
static struct dmirror_device *dmirror_page_to_device(struct page *page)
@@ -706,34 +707,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
return 0;
}
-static int dmirror_atomic_map(unsigned long start, unsigned long end,
- struct page **pages, struct dmirror *dmirror)
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+ struct dmirror *dmirror)
{
- unsigned long pfn, mapped = 0;
- int i;
+ void *entry;
/* Map the migrated pages into the device's page tables. */
mutex_lock(&dmirror->mutex);
- for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
- void *entry;
-
- if (!pages[i])
- continue;
-
- entry = pages[i];
- entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
- entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
- if (xa_is_err(entry)) {
- mutex_unlock(&dmirror->mutex);
- return xa_err(entry);
- }
-
- mapped++;
+ entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
}
mutex_unlock(&dmirror->mutex);
- return mapped;
+ return 0;
}
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
@@ -780,10 +770,8 @@ static int dmirror_exclusive(struct dmirror *dmirror,
unsigned long start, end, addr;
unsigned long size = cmd->npages << PAGE_SHIFT;
struct mm_struct *mm = dmirror->notifier.mm;
- struct page *pages[64];
struct dmirror_bounce bounce;
- unsigned long next;
- int ret;
+ int ret = 0;
start = cmd->addr;
end = start + size;
@@ -795,36 +783,26 @@ static int dmirror_exclusive(struct dmirror *dmirror,
return -EINVAL;
mmap_read_lock(mm);
- for (addr = start; addr < end; addr = next) {
- unsigned long mapped = 0;
- int i;
-
- next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT));
+ for (addr = start; !ret && addr < end; addr += PAGE_SIZE) {
+ struct folio *folio;
+ struct page *page;
- ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
- /*
- * Do dmirror_atomic_map() iff all pages are marked for
- * exclusive access to avoid accessing uninitialized
- * fields of pages.
- */
- if (ret == (next - addr) >> PAGE_SHIFT)
- mapped = dmirror_atomic_map(addr, next, pages, dmirror);
- for (i = 0; i < ret; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
+ page = make_device_exclusive(mm, addr, NULL, &folio);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ break;
}
- if (addr + (mapped << PAGE_SHIFT) < next) {
- mmap_read_unlock(mm);
- mmput(mm);
- return -EBUSY;
- }
+ ret = dmirror_atomic_map(addr, page, dmirror);
+ folio_unlock(folio);
+ folio_put(folio);
}
mmap_read_unlock(mm);
mmput(mm);
+ if (ret)
+ return ret;
+
/* Return the migrated data for verification. */
ret = dmirror_bounce_init(&bounce, start, size);
if (ret)
diff --git a/lib/test_ida.c b/lib/test_ida.c
index c80155a1956d..63078f8dc13f 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -189,6 +189,75 @@ static void ida_check_bad_free(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check ida_find_first_range() and varriants.
+ */
+static void ida_check_find_first(struct ida *ida)
+{
+ /* IDA is empty; all of the below should be not exist */
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, ida_exists(ida, 3));
+ IDA_BUG_ON(ida, ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 63, GFP_KERNEL) != 63);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, !ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, !ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, !ida_exists(ida, (1 << 20) - 1));
+
+ /* Now try to find first */
+ IDA_BUG_ON(ida, ida_find_first(ida) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, -1, 2) != -EINVAL);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 2) != -ENOENT); // no used ID
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 3, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 2, 4) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 3) != -ENOENT); // min > max, fail
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 60) != -ENOENT); // no used ID
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 64) != 63);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 63, 63) != 63);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 64, 1026) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, 1023) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, (1 << 20) - 1) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1024, (1 << 20) - 1) != (1 << 20) - 1);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, (1 << 20), INT_MAX) != -ENOENT);
+
+ ida_free(ida, 3);
+ ida_free(ida, 63);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -202,6 +271,7 @@ static int ida_checks(void)
ida_check_max(&ida);
ida_check_conv(&ida);
ida_check_bad_free(&ida);
+ ida_check_find_first(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0e865bab4a10..080a39d22e73 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1858,6 +1858,54 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_destroy(xa);
}
+static void check_split_2(struct xarray *xa, unsigned long index,
+ unsigned int order, unsigned int new_order)
+{
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i, found;
+ void *entry;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
+
+ /* allocate a node for xas_try_split() */
+ xas_set_err(&xas, -ENOMEM);
+ XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL));
+
+ xas_lock(&xas);
+ xas_try_split(&xas, xa, order);
+ if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) &&
+ new_order < order - 1) {
+ XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL);
+ xas_unlock(&xas);
+ goto out;
+ }
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
+ xas_unlock(&xas);
+
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
+ }
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+out:
+ xas_destroy(&xas);
+ xa_destroy(xa);
+}
+
static noinline void check_split(struct xarray *xa)
{
unsigned int order, new_order;
@@ -1869,6 +1917,10 @@ static noinline void check_split(struct xarray *xa)
check_split_1(xa, 0, order, new_order);
check_split_1(xa, 1UL << order, order, new_order);
check_split_1(xa, 3UL << order, order, new_order);
+
+ check_split_2(xa, 0, order, new_order);
+ check_split_2(xa, 1UL << order, order, new_order);
+ check_split_2(xa, 3UL << order, order, new_order);
}
}
}
diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh
index 561dcac0f359..31fe4ed63de8 100755
--- a/lib/tests/module/gen_test_kallsyms.sh
+++ b/lib/tests/module/gen_test_kallsyms.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
TARGET=$(basename $1)
DIR=lib/tests/module
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
index c715e217ec65..3693c6caf2c4 100644
--- a/lib/vdso/datastore.c
+++ b/lib/vdso/datastore.c
@@ -99,7 +99,8 @@ const struct vm_special_mapping vdso_vvar_mapping = {
struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
{
return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
- VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
+ VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP |
+ VM_PFNMAP | VM_SEALED_SYSMAP,
&vdso_vvar_mapping);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 734bd70c8b9b..01699852f30c 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1699,8 +1699,12 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
return buf;
}
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
- struct printf_spec spec, const char *fmt)
+ struct printf_spec spec)
{
va_list va;
@@ -1713,6 +1717,7 @@ static char *va_format(char *buf, char *end, struct va_format *va_fmt,
return buf;
}
+#pragma GCC diagnostic pop
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
@@ -2291,9 +2296,6 @@ int __init no_hash_pointers_enable(char *str)
}
early_param("no_hash_pointers", no_hash_pointers_enable);
-/* Used for Rust formatting ('%pA'). */
-char *rust_fmt_argument(char *buf, char *end, void *ptr);
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2469,7 +2471,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
- return va_format(buf, end, ptr, spec, fmt);
+ return va_format(buf, end, ptr, spec);
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
diff --git a/lib/xarray.c b/lib/xarray.c
index 116e9286c64e..9644b18af18d 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas)
xas->xa_alloc = node = next;
}
}
+EXPORT_SYMBOL_GPL(xas_destroy);
/**
* xas_nomem() - Allocate memory if needed.
@@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
}
}
+static void __xas_init_node_for_split(struct xa_state *xas,
+ struct xa_node *node, void *entry)
+{
+ unsigned int i;
+ void *sibling = NULL;
+ unsigned int mask = xas->xa_sibs;
+
+ if (!node)
+ return;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(i);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+}
+
/**
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
@@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
gfp_t gfp)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
- unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
@@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
return;
do {
- unsigned int i;
- void *sibling = NULL;
struct xa_node *node;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node)
goto nomem;
- node->array = xas->xa;
- for (i = 0; i < XA_CHUNK_SIZE; i++) {
- if ((i & mask) == 0) {
- RCU_INIT_POINTER(node->slots[i], entry);
- sibling = xa_mk_sibling(i);
- } else {
- RCU_INIT_POINTER(node->slots[i], sibling);
- }
- }
+
+ __xas_init_node_for_split(xas, node, entry);
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
} while (sibs-- > 0);
@@ -1122,6 +1133,128 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
+
+/**
+ * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
+ * @order: Current entry order.
+ *
+ * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
+ * no new xa_node is needed. This function provides the minimal order
+ * xas_try_split() supports.
+ *
+ * Return: the minimal order xas_try_split() supports
+ *
+ * Context: Any context.
+ *
+ */
+unsigned int xas_try_split_min_order(unsigned int order)
+{
+ if (order % XA_CHUNK_SHIFT == 0)
+ return order == 0 ? 0 : order - 1;
+
+ return order - (order % XA_CHUNK_SHIFT);
+}
+EXPORT_SYMBOL_GPL(xas_try_split_min_order);
+
+/**
+ * xas_try_split() - Try to split a multi-index entry.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: Current entry order.
+ *
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries. If and only if one new xa_node is
+ * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
+ * NULL. If more new xa_node are needed, the function gives EINVAL error.
+ *
+ * NOTE: use xas_try_split_min_order() to get next split order instead of
+ * @order - 1 if you want to minmize xas_try_split() calls.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+ gfp_t gfp = GFP_NOWAIT;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+ unsigned int expected_sibs =
+ (1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1;
+
+ /*
+ * No support for splitting sibling entries
+ * (horizontally) or cascade split (vertically), which
+ * requires two or more new xa_nodes.
+ * Since if one xa_node allocation fails,
+ * it is hard to free the prior allocations.
+ */
+ if (sibs || xas->xa_sibs != expected_sibs) {
+ xas_destroy(xas);
+ xas_set_err(xas, -EINVAL);
+ return;
+ }
+
+ if (!child) {
+ child = kmem_cache_alloc_lru(radix_tree_node_cachep,
+ xas->xa_lru, gfp);
+ if (!child) {
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+ return;
+ }
+ RCU_INIT_POINTER(child->parent, xas->xa_alloc);
+ }
+ __xas_init_node_for_split(xas, child, entry);
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ xas_update(xas, child);
+
+ } else {
+ do {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, 0, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ } while (offset-- > xas->xa_offset);
+ }
+
+ node->nr_values += values;
+ xas_update(xas, node);
+}
+EXPORT_SYMBOL_GPL(xas_try_split);
#endif
/**
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 3a1d8d34182e..8fb2a3e17c0e 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -151,9 +151,6 @@ static const config configuration_table[10] = {
* meaning.
*/
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
/* ===========================================================================
* Update a hash value with the given input byte
* IN assertion: all calls to UPDATE_HASH are made with consecutive
@@ -713,8 +710,7 @@ static void check_match(
)
{
/* check that the match is indeed a match */
- if (memcmp((char *)s->window + match,
- (char *)s->window + start, length) != EQUAL) {
+ if (memcmp((char *)s->window + match, (char *)s->window + start, length)) {
fprintf(stderr, " start %u, match %u, length %d\n",
start, match, length);
do {