From 2655421ae69fa479df1575cb2630af9131d28939 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 3 Feb 2023 17:18:36 +1000 Subject: lazy tlb: shoot lazies, non-refcounting lazy tlb mm reference handling scheme On big systems, the mm refcount can become highly contented when doing a lot of context switching with threaded applications. user<->idle switch is one of the important cases. Abandoning lazy tlb entirely slows this switching down quite a bit in the common uncontended case, so that is not viable. Implement a scheme where lazy tlb mm references do not contribute to the refcount, instead they get explicitly removed when the refcount reaches zero. The final mmdrop() sends IPIs to all CPUs in the mm_cpumask and they switch away from this mm to init_mm if it was being used as the lazy tlb mm. Enabling the shoot lazies option therefore requires that the arch ensures that mm_cpumask contains all CPUs that could possibly be using mm. A DEBUG_VM option IPIs every CPU in the system after this to ensure there are no references remaining before the mm is freed. Shootdown IPIs cost could be an issue, but they have not been observed to be a serious problem with this scheme, because short-lived processes tend not to migrate CPUs much, therefore they don't get much chance to leave lazy tlb mm references on remote CPUs. There are a lot of options to reduce them if necessary, described in comments. The near-worst-case can be benchmarked with will-it-scale: context_switch1_threads -t $(($(nproc) / 2)) This will create nproc threads (nproc / 2 switching pairs) all sharing the same mm that spread over all CPUs so each CPU does thread->idle->thread switching. [ Rik came up with basically the same idea a few years ago, so credit to him for that. ] Link: https://lore.kernel.org/linux-mm/20230118080011.2258375-1-npiggin@gmail.com/ Link: https://lore.kernel.org/all/20180728215357.3249-11-riel@surriel.com/ Link: https://lkml.kernel.org/r/20230203071837.1136453-5-npiggin@gmail.com Signed-off-by: Nicholas Piggin Acked-by: Linus Torvalds Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: Michael Ellerman Cc: Nadav Amit Cc: Peter Zijlstra Cc: Rik van Riel Cc: Will Deacon Signed-off-by: Andrew Morton --- lib/Kconfig.debug | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 39d1d93164bd..5cd8183bb4c1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -791,6 +791,16 @@ config DEBUG_VM If unsure, say N. +config DEBUG_VM_SHOOT_LAZIES + bool "Debug MMU_LAZY_TLB_SHOOTDOWN implementation" + depends on DEBUG_VM + depends on MMU_LAZY_TLB_SHOOTDOWN + help + Enable additional IPIs that ensure lazy tlb mm references are removed + before the mm is freed. + + If unsure, say N. + config DEBUG_VM_MAPLE_TREE bool "Debug VM maple trees" depends on DEBUG_VM -- cgit v1.2.3 From 4c85c0be3d7a9a7ffe48bfe0954eacc0ba9d3c75 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Mon, 30 Jan 2023 13:25:13 +0900 Subject: mm, printk: introduce new format %pGt for page_type %pGp format is used to display 'flags' field of a struct page. However, some page flags (i.e. PG_buddy, see page-flags.h for more details) are stored in page_type field. To display human-readable output of page_type, introduce %pGt format. It is important to note the meaning of bits are different in page_type. if page_type is 0xffffffff, no flags are set. Setting PG_buddy (0x00000080) flag results in a page_type of 0xffffff7f. Clearing a bit actually means setting a flag. Bits in page_type are inverted when displaying type names. Only values for which page_type_has_type() returns true are considered as page_type, to avoid confusion with mapcount values. if it returns false, only raw values are displayed and not page type names. Link: https://lkml.kernel.org/r/20230130042514.2418-3-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Petr Mladek [vsprintf part] Cc: Andy Shevchenko Cc: David Hildenbrand Cc: Joe Perches Cc: John Ogness Cc: Matthew Wilcox Cc: Sergey Senozhatsky Cc: Steven Rostedt (Google) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- lib/test_printf.c | 26 ++++++++++++++++++++++++++ lib/vsprintf.c | 21 +++++++++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'lib') diff --git a/lib/test_printf.c b/lib/test_printf.c index 46b4e6c414a3..7677ebccf3c3 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -642,12 +642,26 @@ page_flags_test(int section, int node, int zone, int last_cpupid, test(cmp_buf, "%pGp", &flags); } +static void __init page_type_test(unsigned int page_type, const char *name, + char *cmp_buf) +{ + unsigned long size; + + size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type); + if (page_type_has_type(page_type)) + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); + + snprintf(cmp_buf + size, BUF_SIZE - size, ")"); + test(cmp_buf, "%pGt", &page_type); +} + static void __init flags(void) { unsigned long flags; char *cmp_buffer; gfp_t gfp; + unsigned int page_type; cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); if (!cmp_buffer) @@ -687,6 +701,18 @@ flags(void) gfp |= __GFP_HIGH; test(cmp_buffer, "%pGg", &gfp); + page_type = ~0; + page_type_test(page_type, "", cmp_buffer); + + page_type = 10; + page_type_test(page_type, "", cmp_buffer); + + page_type = ~PG_buddy; + page_type_test(page_type, "buddy", cmp_buffer); + + page_type = ~(PG_table | PG_buddy); + page_type_test(page_type, "table|buddy", cmp_buffer); + kfree(cmp_buffer); } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index be71a03c936a..fbe320b5e89f 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -2052,6 +2052,25 @@ char *format_page_flags(char *buf, char *end, unsigned long flags) return buf; } +static +char *format_page_type(char *buf, char *end, unsigned int page_type) +{ + buf = number(buf, end, page_type, default_flag_spec); + + if (buf < end) + *buf = '('; + buf++; + + if (page_type_has_type(page_type)) + buf = format_flags(buf, end, ~page_type, pagetype_names); + + if (buf < end) + *buf = ')'; + buf++; + + return buf; +} + static noinline_for_stack char *flags_string(char *buf, char *end, void *flags_ptr, struct printf_spec spec, const char *fmt) @@ -2065,6 +2084,8 @@ char *flags_string(char *buf, char *end, void *flags_ptr, switch (fmt[1]) { case 'p': return format_page_flags(buf, end, *(unsigned long *)flags_ptr); + case 't': + return format_page_type(buf, end, *(unsigned int *)flags_ptr); case 'v': flags = *(unsigned long *)flags_ptr; names = vmaflag_names; -- cgit v1.2.3 From 8e00b2dffd822b34d8d1c627dc19f0743f9f5ac6 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Mon, 6 Mar 2023 12:13:21 +0100 Subject: lib/stackdepot: kmsan: mark API outputs as initialized KMSAN does not instrument stackdepot and may treat memory allocated by it as uninitialized. This is not a problem for KMSAN itself, because its functions calling stackdepot API are also not instrumented. But other kernel features (e.g. netdev tracker) may access stack depot from instrumented code, which will lead to false positives, unless we explicitly mark stackdepot outputs as initialized. Link: https://lkml.kernel.org/r/20230306111322.205724-1-glider@google.com Signed-off-by: Alexander Potapenko Reported-by: syzbot Reviewed-by: Dmitry Vyukov Suggested-by: Dmitry Vyukov Reviewed-by: Andrey Konovalov Cc: Marco Elver Signed-off-by: Andrew Morton --- lib/stackdepot.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'lib') diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 036da8e295d1..2f5aa851834e 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -306,6 +307,11 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) stack->handle.extra = 0; memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); pool_offset += required_size; + /* + * Let KMSAN know the stored stack record is initialized. This shall + * prevent false positive reports if instrumented code accesses it. + */ + kmsan_unpoison_memory(stack, required_size); return stack; } @@ -465,6 +471,12 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, struct stack_record *stack; *entries = NULL; + /* + * Let KMSAN know *entries is initialized. This shall prevent false + * positive reports if instrumented code accesses it. + */ + kmsan_unpoison_memory(entries, sizeof(*entries)); + if (!handle) return 0; -- cgit v1.2.3 From 5c63a7c32a94a7e2fecdd6754a6ff47cd4226ee1 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Thu, 2 Mar 2023 02:10:35 +0100 Subject: maple_tree: export symbol mas_preallocate() Fix missing EXPORT_SYMBOL_GPL() statement for mas_preallocate(). It isn't actually used by anything yet, but mas_preallocate() is part of the maple tree's 'Advanced API'. All other functions of this API are exported already. Link: https://lkml.kernel.org/r/20230302011035.4928-1-dakr@redhat.com Signed-off-by: Danilo Krummrich Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 9e2735cbc2b4..ae37a167e25d 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5726,6 +5726,7 @@ int mas_preallocate(struct ma_state *mas, gfp_t gfp) mas_reset(mas); return ret; } +EXPORT_SYMBOL_GPL(mas_preallocate); /* * mas_destroy() - destroy a maple state. -- cgit v1.2.3 From 4f80818b4a58c9458dce0df7cce9abe107da445e Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 22 Mar 2023 18:57:03 +0000 Subject: iov_iter: add copy_page_to_iter_nofault() Provide a means to copy a page to user space from an iterator, aborting if a page fault would occur. This supports compound pages, but may be passed a tail page with an offset extending further into the compound page, so we cannot pass a folio. This allows for this function to be called from atomic context and _try_ to user pages if they are faulted in, aborting if not. The function does not use _copy_to_iter() in order to not specify might_fault(), this is similar to copy_page_from_iter_atomic(). This is being added in order that an iteratable form of vread() can be implemented while holding spinlocks. Link: https://lkml.kernel.org/r/19734729defb0f498a76bdec1bef3ac48a3af3e8.1679511146.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Baoquan He Cc: Alexander Viro Cc: David Hildenbrand Cc: Jens Axboe Cc: Jiri Olsa Cc: Liu Shixin Cc: Matthew Wilcox (Oracle) Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- lib/iov_iter.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'lib') diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 274014e4eafe..34dd6bdf2fba 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -172,6 +172,18 @@ static int copyout(void __user *to, const void *from, size_t n) return n; } +static int copyout_nofault(void __user *to, const void *from, size_t n) +{ + long res; + + if (should_fail_usercopy()) + return n; + + res = copy_to_user_nofault(to, from, n); + + return res < 0 ? n : res; +} + static int copyin(void *to, const void __user *from, size_t n) { size_t res = n; @@ -734,6 +746,42 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_to_iter); +size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, + struct iov_iter *i) +{ + size_t res = 0; + + if (!page_copy_sane(page, offset, bytes)) + return 0; + if (WARN_ON_ONCE(i->data_source)) + return 0; + if (unlikely(iov_iter_is_pipe(i))) + return copy_page_to_iter_pipe(page, offset, bytes, i); + page += offset / PAGE_SIZE; // first subpage + offset %= PAGE_SIZE; + while (1) { + void *kaddr = kmap_local_page(page); + size_t n = min(bytes, (size_t)PAGE_SIZE - offset); + + iterate_and_advance(i, n, base, len, off, + copyout_nofault(base, kaddr + offset + off, len), + memcpy(base, kaddr + offset + off, len) + ) + kunmap_local(kaddr); + res += n; + bytes -= n; + if (!bytes || !n) + break; + offset += n; + if (offset == PAGE_SIZE) { + page++; + offset = 0; + } + } + return res; +} +EXPORT_SYMBOL(copy_page_to_iter_nofault); + size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { -- cgit v1.2.3 From 869cb29a61a14bbc52e7bc8b18e8810874caf320 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Thu, 30 Mar 2023 21:06:39 +0200 Subject: lib/test_vmalloc.c: add vm_map_ram()/vm_unmap_ram() test case Add vm_map_ram()/vm_unmap_ram() test case to our stress test-suite. [akpm@linux-foundation.org: fix whitespace, per Lorenzo] Link: https://lkml.kernel.org/r/20230330190639.431589-2-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) Reviewed-by: Lorenzo Stoakes Reviewed-by: Baoquan He Cc: Christoph Hellwig Cc: Dave Chinner Cc: Matthew Wilcox (Oracle) Cc: Oleksiy Avramchenko Signed-off-by: Andrew Morton --- lib/test_vmalloc.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'lib') diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index de4ee0d50906..84c124f097b7 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -53,6 +53,7 @@ __param(int, run_test_mask, INT_MAX, "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" + "\t\tid: 1024, name: vm_map_ram_test\n" /* Add a new test case description here. */ ); @@ -358,6 +359,41 @@ kvfree_rcu_2_arg_vmalloc_test(void) return 0; } +static int +vm_map_ram_test(void) +{ + unsigned long nr_allocated; + unsigned int map_nr_pages; + unsigned char *v_ptr; + struct page **pages; + int i; + + map_nr_pages = nr_pages > 0 ? nr_pages:1; + pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL); + if (!pages) + return -1; + + nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); + if (nr_allocated != map_nr_pages) + goto cleanup; + + /* Run the test loop. */ + for (i = 0; i < test_loop_count; i++) { + v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE); + *v_ptr = 'a'; + vm_unmap_ram(v_ptr, map_nr_pages); + } + +cleanup: + for (i = 0; i < nr_allocated; i++) + __free_page(pages[i]); + + kfree(pages); + + /* 0 indicates success. */ + return nr_allocated != map_nr_pages; +} + struct test_case_desc { const char *test_name; int (*test_func)(void); @@ -374,6 +410,7 @@ static struct test_case_desc test_case_array[] = { { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, + { "vm_map_ram_test", vm_map_ram_test }, /* Add a new test case here. */ }; -- cgit v1.2.3 From 97f7e09481f312b143db53cadbdfe81abac97e73 Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Tue, 14 Mar 2023 20:42:02 +0800 Subject: maple_tree: simplify mas_wr_node_walk() Simplify code of mas_wr_node_walk() without changing functionality, and improve readability. Remove some special judgments. Instead of dynamically recording the min and max in the loop, get the final min and max directly at the end. Link: https://lkml.kernel.org/r/20230314124203.91572-3-zhangpeng.00@bytedance.com Signed-off-by: Peng Zhang Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 34 +++++----------------------------- 1 file changed, 5 insertions(+), 29 deletions(-) (limited to 'lib') diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4a6ecdb12a92..f475bac9d914 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2312,9 +2312,7 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned char count; - unsigned char offset; - unsigned long index, min, max; + unsigned char count, offset; if (unlikely(ma_is_dense(wr_mas->type))) { wr_mas->r_max = wr_mas->r_min = mas->index; @@ -2327,34 +2325,12 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, wr_mas->pivots, mas->max); offset = mas->offset; - min = mas_safe_min(mas, wr_mas->pivots, offset); - if (unlikely(offset == count)) - goto max; - - max = wr_mas->pivots[offset]; - index = mas->index; - if (unlikely(index <= max)) - goto done; - - if (unlikely(!max && offset)) - goto max; - min = max + 1; - while (++offset < count) { - max = wr_mas->pivots[offset]; - if (index <= max) - goto done; - else if (unlikely(!max)) - break; - - min = max + 1; - } + while (offset < count && mas->index > wr_mas->pivots[offset]) + offset++; -max: - max = mas->max; -done: - wr_mas->r_max = max; - wr_mas->r_min = min; + wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; + wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); wr_mas->offset_end = mas->offset = offset; } -- cgit v1.2.3 From fb20e99a74f8f08c53061e0186d0c26d546dc843 Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Tue, 11 Apr 2023 10:35:13 +0800 Subject: maple_tree: use correct variable type in sizeof The type of variable pointed to by pivs is unsigned long, but the type used in sizeof is a pointer type. Change it to unsigned long. This change has no runtime effect, as sizeof(ul) == sizeof(ul *). Link: https://lkml.kernel.org/r/20230411023513.15227-1-zhangpeng.00@bytedance.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Peng Zhang Reported-by: David Binderman Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f475bac9d914..9172bcee94b4 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3258,7 +3258,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end if (tmp < max_p) memset(pivs + tmp, 0, - sizeof(unsigned long *) * (max_p - tmp)); + sizeof(unsigned long) * (max_p - tmp)); if (tmp < mt_slots[mt]) memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); -- cgit v1.2.3 From 13215e8a4bb336dac2af561d4f5c34a071810ee4 Mon Sep 17 00:00:00 2001 From: Yajun Deng Date: Mon, 17 Apr 2023 11:52:26 +0800 Subject: lib/show_mem.c: use for_each_populated_zone() simplify code __show_mem() needs to iterate over all zones that have memory, we can simplify the code by using for_each_populated_zone(). Link: https://lkml.kernel.org/r/20230417035226.4013584-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Johannes Weiner Signed-off-by: Andrew Morton --- lib/show_mem.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/show_mem.c b/lib/show_mem.c index 0d7585cde2a6..1485c87be935 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -10,26 +10,19 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) { - pg_data_t *pgdat; unsigned long total = 0, reserved = 0, highmem = 0; + struct zone *zone; printk("Mem-Info:\n"); __show_free_areas(filter, nodemask, max_zone_idx); - for_each_online_pgdat(pgdat) { - int zoneid; + for_each_populated_zone(zone) { - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { - struct zone *zone = &pgdat->node_zones[zoneid]; - if (!populated_zone(zone)) - continue; + total += zone->present_pages; + reserved += zone->present_pages - zone_managed_pages(zone); - total += zone->present_pages; - reserved += zone->present_pages - zone_managed_pages(zone); - - if (is_highmem_idx(zoneid)) - highmem += zone->present_pages; - } + if (is_highmem(zone)) + highmem += zone->present_pages; } printk("%lu pages RAM\n", total); -- cgit v1.2.3 From 29ad6bb313487370f9dfe5441fc8982593b6384e Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Wed, 19 Apr 2023 17:36:25 +0800 Subject: maple_tree: fix allocation in mas_sparse_area() In the case of reverse allocation, mas->index and mas->last do not point to the correct allocation range, which will cause users to get incorrect allocation results, so fix it. If the user does not use it in a specific way, this bug will not be triggered. This is a bug, but only VMA uses it now, the way VMA is used now will not trigger it. There is a possibility that a user will trigger it in the future. Also re-check whether the size is still satisfied after the lower bound was increased, which is a corner case and is incorrect in previous versions. Link: https://lkml.kernel.org/r/20230419093625.99201-1-zhangpeng.00@bytedance.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Peng Zhang Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) (limited to 'lib') diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 9172bcee94b4..110a36479dce 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5250,25 +5250,28 @@ static inline void mas_fill_gap(struct ma_state *mas, void *entry, * @size: The size of the gap * @fwd: Searching forward or back */ -static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, +static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, unsigned long max, unsigned long size, bool fwd) { - unsigned long start = 0; - - if (!unlikely(mas_is_none(mas))) - start++; + if (!unlikely(mas_is_none(mas)) && min == 0) { + min++; + /* + * At this time, min is increased, we need to recheck whether + * the size is satisfied. + */ + if (min > max || max - min + 1 < size) + return -EBUSY; + } /* mas_is_ptr */ - if (start < min) - start = min; - if (fwd) { - mas->index = start; - mas->last = start + size - 1; - return; + mas->index = min; + mas->last = min + size - 1; + } else { + mas->last = max; + mas->index = max - size + 1; } - - mas->index = max; + return 0; } /* @@ -5297,10 +5300,8 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, return -EBUSY; /* Empty set */ - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_sparse_area(mas, min, max, size, true); - return 0; - } + if (mas_is_none(mas) || mas_is_ptr(mas)) + return mas_sparse_area(mas, min, max, size, true); /* The start of the window can only be within these values */ mas->index = min; @@ -5356,10 +5357,8 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, } /* Empty set. */ - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_sparse_area(mas, min, max, size, false); - return 0; - } + if (mas_is_none(mas) || mas_is_ptr(mas)) + return mas_sparse_area(mas, min, max, size, false); /* The start of the window can only be within these values. */ mas->index = min; -- cgit v1.2.3