diff options
author | Jason Liu <jason.hui.liu@nxp.com> | 2020-03-08 18:57:18 +0800 |
---|---|---|
committer | Jason Liu <jason.hui.liu@nxp.com> | 2020-03-08 18:57:18 +0800 |
commit | 335d2828a9000fab6f3895f261e3281342f51f5b (patch) | |
tree | 6c7443842b407e98ef110de6683346455b4d0f88 /lib | |
parent | 1d12f4912e3b630ee81f17e164309d172d8c50b5 (diff) | |
parent | cff670b3eb68257029e2977a6bfeac7d9b829e9a (diff) |
Merge tag 'v5.4.24' into imx_5.4.y
Merge Linux stable release v5.4.24 into imx_5.4.y
* tag 'v5.4.24': (3306 commits)
Linux 5.4.24
blktrace: Protect q->blk_trace with RCU
kvm: nVMX: VMWRITE checks unsupported field before read-only field
...
Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>
Conflicts:
arch/arm/boot/dts/imx6sll-evk.dts
arch/arm/boot/dts/imx7ulp.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
drivers/clk/imx/clk-composite-8m.c
drivers/gpio/gpio-mxc.c
drivers/irqchip/Kconfig
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/net/can/flexcan.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/phy/realtek.c
drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/tee/optee/shm_pool.c
drivers/usb/cdns3/gadget.c
kernel/sched/cpufreq.c
net/core/xdp.c
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_sai.c
sound/soc/sof/core.c
sound/soc/sof/imx/Kconfig
sound/soc/sof/loader.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 11 | ||||
-rw-r--r-- | lib/debugobjects.c | 46 | ||||
-rw-r--r-- | lib/raid6/mktables.c | 2 | ||||
-rw-r--r-- | lib/raid6/unroll.awk | 2 | ||||
-rw-r--r-- | lib/sbitmap.c | 2 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/stackdepot.c | 8 | ||||
-rw-r--r-- | lib/strncpy_from_user.c | 14 | ||||
-rw-r--r-- | lib/strnlen_user.c | 14 | ||||
-rw-r--r-- | lib/test_kasan.c | 1 | ||||
-rw-r--r-- | lib/test_xarray.c | 78 | ||||
-rw-r--r-- | lib/ubsan.c | 69 | ||||
-rw-r--r-- | lib/xarray.c | 41 |
13 files changed, 170 insertions, 120 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 93d97f9b0157..f61d834e02fe 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -299,17 +299,6 @@ config HEADERS_INSTALL user-space program samples. It is also needed by some features such as uapi header sanity checks. -config HEADERS_CHECK - bool "Run sanity checks on uapi headers when building 'all'" - depends on HEADERS_INSTALL - help - This option will run basic sanity checks on uapi headers when - building the 'all' target, for example, ensure that they do not - attempt to include files which were not exported, etc. - - If you're making modifications to header files which are - relevant for userspace, say 'Y'. - config OPTIMIZE_INLINING def_bool y help diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 61261195f5b6..48054dbf1b51 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -132,14 +132,18 @@ static void fill_pool(void) struct debug_obj *obj; unsigned long flags; - if (likely(obj_pool_free >= debug_objects_pool_min_level)) + if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) return; /* * Reuse objs from the global free list; they will be reinitialized * when allocating. + * + * Both obj_nr_tofree and obj_pool_free are checked locklessly; the + * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical + * sections. */ - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { + while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have @@ -148,9 +152,9 @@ static void fill_pool(void) while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); - obj_nr_tofree--; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -158,7 +162,7 @@ static void fill_pool(void) if (unlikely(!obj_cache)) return; - while (obj_pool_free < debug_objects_pool_min_level) { + while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { struct debug_obj *new[ODEBUG_BATCH_SIZE]; int cnt; @@ -174,7 +178,7 @@ static void fill_pool(void) while (cnt) { hlist_add_head(&new[--cnt]->node, &obj_pool); debug_objects_allocated++; - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } @@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) obj = __alloc_object(&obj_pool); if (obj) { obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); /* * Looking ahead, allocate one batch of debug objects and @@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) &percpu_pool->free_objs); percpu_pool->obj_free++; obj_pool_used++; - obj_pool_free--; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); } } @@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work) obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); hlist_add_head(&obj->node, &obj_pool); - obj_pool_free++; - obj_nr_tofree--; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); return; @@ -324,7 +328,7 @@ free_objs: if (obj_nr_tofree) { hlist_move_list(&obj_to_free, &tofree); debug_objects_freed += obj_nr_tofree; - obj_nr_tofree = 0; + WRITE_ONCE(obj_nr_tofree, 0); } raw_spin_unlock_irqrestore(&pool_lock, flags); @@ -375,10 +379,10 @@ free_to_obj_pool: obj_pool_used--; if (work) { - obj_nr_tofree++; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); hlist_add_head(&obj->node, &obj_to_free); if (lookahead_count) { - obj_nr_tofree += lookahead_count; + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -396,15 +400,15 @@ free_to_obj_pool: for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { obj = __alloc_object(&obj_pool); hlist_add_head(&obj->node, &obj_to_free); - obj_pool_free--; - obj_nr_tofree++; + WRITE_ONCE(obj_pool_free, obj_pool_free - 1); + WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); } } } else { - obj_pool_free++; + WRITE_ONCE(obj_pool_free, obj_pool_free + 1); hlist_add_head(&obj->node, &obj_pool); if (lookahead_count) { - obj_pool_free += lookahead_count; + WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, @@ -423,7 +427,7 @@ free_to_obj_pool: static void free_object(struct debug_obj *obj) { __free_object(obj); - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -982,7 +986,7 @@ repeat: debug_objects_maxchecked = objs_checked; /* Schedule work to actually kmem_cache_free() objects */ - if (!obj_freeing && obj_nr_tofree) { + if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } @@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v) seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "fixups :%d\n", debug_objects_fixups); - seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free); + seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); - seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); + seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 9c485df1308f..f02e10fa6238 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c @@ -56,8 +56,8 @@ int main(int argc, char *argv[]) uint8_t v; uint8_t exptbl[256], invtbl[256]; - printf("#include <linux/raid/pq.h>\n"); printf("#include <linux/export.h>\n"); + printf("#include <linux/raid/pq.h>\n"); /* Compute multiplication table */ printf("\nconst u8 __attribute__((aligned(256)))\n" diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk index c6aa03631df8..0809805a7e23 100644 --- a/lib/raid6/unroll.awk +++ b/lib/raid6/unroll.awk @@ -13,7 +13,7 @@ BEGIN { for (i = 0; i < rep; ++i) { tmp = $0 gsub(/\$\$/, i, tmp) - gsub(/\$\#/, n, tmp) + gsub(/\$#/, n, tmp) gsub(/\$\*/, "$", tmp) print tmp } diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 969e5400a615..ee3ce1494568 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -667,8 +667,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, if (!sbq_wait->sbq) { sbq_wait->sbq = sbq; atomic_inc(&sbq->ws_active); + add_wait_queue(&ws->wait, &sbq_wait->wait); } - add_wait_queue(&ws->wait, &sbq_wait->wait); } EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c2cf2c311b7d..5813072bc589 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -311,7 +311,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, if (prv) table->nents = ++table->orig_nents; - return -ENOMEM; + return -ENOMEM; } sg_init_table(sg, alloc_size); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index ed717dd08ff3..81c69c08d1d1 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc) return true; if (stack_slabs[depot_index] == NULL) { stack_slabs[depot_index] = *prealloc; + *prealloc = NULL; } else { - stack_slabs[depot_index + 1] = *prealloc; + /* If this is the last depot slab, do not touch the next one. */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) { + stack_slabs[depot_index + 1] = *prealloc; + *prealloc = NULL; + } /* * This smp_store_release pairs with smp_load_acquire() from * |next_slab_inited| above and in stack_depot_save(). */ smp_store_release(&next_slab_inited, 1); } - *prealloc = NULL; return true; } diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index dccb95af6003..706020b06617 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; - /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; @@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count) unsigned long max = max_addr - src_addr; long retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + kasan_check_write(dst, count); check_object_size(dst, count, false); if (user_access_begin(src, max)) { diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 6c0005d5dd5c..41670d4a5816 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -27,13 +27,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long c; /* - * Truncate 'max' to the user-specified limit, so that - * we only have one limit we need to check in the loop - */ - if (max > count) - max = count; - - /* * Do everything aligned. But that means that we * need to also expand the maximum.. */ @@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count) unsigned long max = max_addr - src_addr; long retval; + /* + * Truncate 'max' to the user-specified limit, so that + * we only have one limit we need to check in the loop + */ + if (max > count) + max = count; + if (user_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); user_access_end(); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 49cc4d570a40..bd3d9ef7d39e 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -157,6 +157,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void) if (!ptr1 || !ptr2) { pr_err("Allocation failed\n"); kfree(ptr1); + kfree(ptr2); return; } diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 7df4f7f395bf..55c14e8c8859 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -2,6 +2,7 @@ /* * test_xarray.c: Test the XArray API * Copyright (c) 2017-2018 Microsoft Corporation + * Copyright (c) 2019-2020 Oracle * Author: Matthew Wilcox <willy@infradead.org> */ @@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find(struct xarray *xa) +static noinline void check_multi_find_1(struct xarray *xa, unsigned order) { #ifdef CONFIG_XARRAY_MULTI + unsigned long multi = 3 << order; + unsigned long next = 4 << order; unsigned long index; - xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); - XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); + xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); + XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); index = 0; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(12)); - XA_BUG_ON(xa, index != 12); - index = 13; + xa_mk_value(multi)); + XA_BUG_ON(xa, index != multi); + index = multi + 1; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(12)); - XA_BUG_ON(xa, (index < 12) || (index >= 16)); + xa_mk_value(multi)); + XA_BUG_ON(xa, (index < multi) || (index >= next)); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != - xa_mk_value(16)); - XA_BUG_ON(xa, index != 16); - - xa_erase_index(xa, 12); - xa_erase_index(xa, 16); + xa_mk_value(next)); + XA_BUG_ON(xa, index != next); + XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); + XA_BUG_ON(xa, index != next); + + xa_erase_index(xa, multi); + xa_erase_index(xa, next); + xa_erase_index(xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } @@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa) xa_destroy(xa); } +static noinline void check_find_4(struct xarray *xa) +{ + unsigned long index = 0; + void *entry; + + xa_store_index(xa, ULONG_MAX, GFP_KERNEL); + + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); + XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); + + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); + XA_BUG_ON(xa, entry); + + xa_erase_index(xa, ULONG_MAX); +} + static noinline void check_find(struct xarray *xa) { + unsigned i; + check_find_1(xa); check_find_2(xa); check_find_3(xa); - check_multi_find(xa); + check_find_4(xa); + + for (i = 2; i < 10; i++) + check_multi_find_1(xa, i); check_multi_find_2(xa); } @@ -1132,6 +1160,27 @@ static noinline void check_move_tiny(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_move_max(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + xa_store_index(xa, ULONG_MAX, GFP_KERNEL); + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); + rcu_read_unlock(); + + xas_set(&xas, 0); + rcu_read_lock(); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); + xas_pause(&xas); + XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); + rcu_read_unlock(); + + xa_erase_index(xa, ULONG_MAX); + XA_BUG_ON(xa, !xa_empty(xa)); +} + static noinline void check_move_small(struct xarray *xa, unsigned long idx) { XA_STATE(xas, xa, 0); @@ -1240,6 +1289,7 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); check_move_tiny(xa); + check_move_max(xa); for (i = 0; i < 16; i++) check_move_small(xa, 1UL << i); diff --git a/lib/ubsan.c b/lib/ubsan.c index e7d31735950d..f007a406f89c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, } } -static DEFINE_SPINLOCK(report_lock); - -static void ubsan_prologue(struct source_location *location, - unsigned long *flags) +static void ubsan_prologue(struct source_location *location) { current->in_ubsan++; - spin_lock_irqsave(&report_lock, *flags); pr_err("========================================" "========================================\n"); print_source_location("UBSAN: Undefined behaviour in", location); } -static void ubsan_epilogue(unsigned long *flags) +static void ubsan_epilogue(void) { dump_stack(); pr_err("========================================" "========================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; } @@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, { struct type_descriptor *type = data->type; - unsigned long flags; char lhs_val_str[VALUE_LENGTH]; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); @@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, rhs_val_str, type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } void __ubsan_handle_add_overflow(struct overflow_data *data, @@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, void *old_val) { - unsigned long flags; char old_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); pr_err("negation of %s cannot be represented in type %s:\n", old_val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_negate_overflow); @@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, void *lhs, void *rhs) { - unsigned long flags; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); @@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, else pr_err("division by zero\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], (void *)ptr, data->type->type_name); pr_err("which requires %ld byte alignment\n", data->alignment); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, @@ -351,34 +338,33 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { - unsigned long flags; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(index_str, sizeof(index_str), data->index_type, index); pr_err("index %s is out of range for type %s\n", index_str, data->array_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, void *lhs, void *rhs) { - unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; char lhs_str[VALUE_LENGTH]; + unsigned long ua_flags = user_access_save(); if (suppress_report(&data->location)) - return; + goto out; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); @@ -401,18 +387,18 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, lhs_str, rhs_str, lhs_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); +out: + user_access_restore(ua_flags); } EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { - unsigned long flags; - - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); pr_err("calling __builtin_unreachable()\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); panic("can't return from __builtin_unreachable()"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); @@ -420,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, void *val) { - unsigned long flags; char val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(val_str, sizeof(val_str), data->type, val); pr_err("load of value %s is not a valid value for type %s\n", val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); diff --git a/lib/xarray.c b/lib/xarray.c index 1237c213f52b..1d9fab7db8da 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0+ /* * XArray implementation - * Copyright (c) 2017 Microsoft Corporation + * Copyright (c) 2017-2018 Microsoft Corporation + * Copyright (c) 2018-2020 Oracle * Author: Matthew Wilcox <willy@infradead.org> */ @@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas) if (xas_invalid(xas)) return; + xas->xa_node = XAS_RESTART; if (node) { unsigned int offset = xas->xa_offset; while (++offset < XA_CHUNK_SIZE) { @@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas) break; } xas->xa_index += (offset - xas->xa_offset) << node->shift; + if (xas->xa_index == 0) + xas->xa_node = XAS_BOUNDS; } else { xas->xa_index++; } - xas->xa_node = XAS_RESTART; } EXPORT_SYMBOL_GPL(xas_pause); @@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max) { void *entry; - if (xas_error(xas)) + if (xas_error(xas) || xas->xa_node == XAS_BOUNDS) return NULL; + if (xas->xa_index > max) + return set_bounds(xas); if (!xas->xa_node) { xas->xa_index = 1; return set_bounds(xas); - } else if (xas_top(xas->xa_node)) { + } else if (xas->xa_node == XAS_RESTART) { entry = xas_load(xas); if (entry || xas_not_node(xas->xa_node)) return entry; @@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) if (xas_error(xas)) return NULL; + if (xas->xa_index > max) + goto max; if (!xas->xa_node) { xas->xa_index = 1; @@ -1824,6 +1831,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp, } EXPORT_SYMBOL(xa_find); +static bool xas_sibling(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + unsigned long mask; + + if (!node) + return false; + mask = (XA_CHUNK_SIZE << node->shift) - 1; + return (xas->xa_index & mask) > (xas->xa_offset << node->shift); +} + /** * xa_find_after() - Search the XArray for a present entry. * @xa: XArray. @@ -1847,21 +1865,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp, XA_STATE(xas, xa, *indexp + 1); void *entry; + if (xas.xa_index == 0) + return NULL; + rcu_read_lock(); for (;;) { if ((__force unsigned int)filter < XA_MAX_MARKS) entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); - if (xas.xa_node == XAS_BOUNDS) + + if (xas_invalid(&xas)) break; - if (xas.xa_shift) { - if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) - continue; - } else { - if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) - continue; - } + if (xas_sibling(&xas)) + continue; if (!xas_retry(&xas, entry)) break; } |