summaryrefslogtreecommitdiff
path: root/lib/raid
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2026-03-27 07:16:39 +0100
committerAndrew Morton <akpm@linux-foundation.org>2026-04-02 23:36:16 -0700
commit0471415f3fd6007bf435dbf158060bc646d7813f (patch)
tree79b5ac23d0f510dcafe4338e3bd78d3d40c318e9 /lib/raid
parent7c6e6b2b48e8e9f3a1ad57dc78a8d33947cb5dda (diff)
xor: cleanup registration and probing
Originally, the XOR code benchmarked all algorithms at load time, but it has since then been hacked multiple times to allow forcing an algorithm, and then commit 524ccdbdfb52 ("crypto: xor - defer load time benchmark to a later time") changed the logic to a two-step process or registration and benchmarking, but only when built-in. Rework this, so that the XOR_TRY_TEMPLATES macro magic now always just deals with adding the templates to the list, and benchmarking is always done in a second pass; for modular builds from module_init, and for the built-in case using a separate init call level. Link: https://lkml.kernel.org/r/20260327061704.3707577-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Tested-by: Eric Biggers <ebiggers@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Mason <clm@fb.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: David Sterba <dsterba@suse.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Li Nan <linan122@huawei.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Song Liu <song@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Ted Ts'o <tytso@mit.edu> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/raid')
-rw-r--r--lib/raid/xor/xor-core.c98
1 files changed, 48 insertions, 50 deletions
diff --git a/lib/raid/xor/xor-core.c b/lib/raid/xor/xor-core.c
index c54f48405c40..e6e593e404fb 100644
--- a/lib/raid/xor/xor-core.c
+++ b/lib/raid/xor/xor-core.c
@@ -52,29 +52,14 @@ EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */
static struct xor_block_template *__initdata template_list;
+static bool __initdata xor_forced = false;
-#ifndef MODULE
static void __init do_xor_register(struct xor_block_template *tmpl)
{
tmpl->next = template_list;
template_list = tmpl;
}
-static int __init register_xor_blocks(void)
-{
- active_template = XOR_SELECT_TEMPLATE(NULL);
-
- if (!active_template) {
-#define xor_speed do_xor_register
- // register all the templates and pick the first as the default
- XOR_TRY_TEMPLATES;
-#undef xor_speed
- active_template = template_list;
- }
- return 0;
-}
-#endif
-
#define BENCH_SIZE 4096
#define REPS 800U
@@ -85,9 +70,6 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
unsigned long reps;
ktime_t min, start, t0;
- tmpl->next = template_list;
- template_list = tmpl;
-
preempt_disable();
reps = 0;
@@ -111,63 +93,79 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
}
-static int __init
-calibrate_xor_blocks(void)
+static int __init calibrate_xor_blocks(void)
{
void *b1, *b2;
struct xor_block_template *f, *fastest;
- fastest = XOR_SELECT_TEMPLATE(NULL);
-
- if (fastest) {
- printk(KERN_INFO "xor: automatically using best "
- "checksumming function %-10s\n",
- fastest->name);
- goto out;
- }
+ if (xor_forced)
+ return 0;
b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (!b1) {
- printk(KERN_WARNING "xor: Yikes! No memory available.\n");
+ pr_warn("xor: Yikes! No memory available.\n");
return -ENOMEM;
}
b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
- /*
- * If this arch/cpu has a short-circuited selection, don't loop through
- * all the possible functions, just test the best one
- */
-
-#define xor_speed(templ) do_xor_speed((templ), b1, b2)
-
- printk(KERN_INFO "xor: measuring software checksum speed\n");
- template_list = NULL;
- XOR_TRY_TEMPLATES;
+ pr_info("xor: measuring software checksum speed\n");
fastest = template_list;
- for (f = fastest; f; f = f->next)
+ for (f = template_list; f; f = f->next) {
+ do_xor_speed(f, b1, b2);
if (f->speed > fastest->speed)
fastest = f;
-
+ }
+ active_template = fastest;
pr_info("xor: using function: %s (%d MB/sec)\n",
fastest->name, fastest->speed);
+ free_pages((unsigned long)b1, 2);
+ return 0;
+}
+
+static int __init xor_init(void)
+{
+ /*
+ * If this arch/cpu has a short-circuited selection, don't loop through
+ * all the possible functions, just use the best one.
+ */
+ active_template = XOR_SELECT_TEMPLATE(NULL);
+ if (active_template) {
+ pr_info("xor: automatically using best checksumming function %-10s\n",
+ active_template->name);
+ xor_forced = true;
+ return 0;
+ }
+
+#define xor_speed do_xor_register
+ XOR_TRY_TEMPLATES;
#undef xor_speed
- free_pages((unsigned long)b1, 2);
-out:
- active_template = fastest;
+#ifdef MODULE
+ return calibrate_xor_blocks();
+#else
+ /*
+ * Pick the first template as the temporary default until calibration
+ * happens.
+ */
+ active_template = template_list;
return 0;
+#endif
}
-static __exit void xor_exit(void) { }
+static __exit void xor_exit(void)
+{
+}
MODULE_DESCRIPTION("RAID-5 checksumming functions");
MODULE_LICENSE("GPL");
+/*
+ * When built-in we must register the default template before md, but we don't
+ * want calibration to run that early as that would delay the boot process.
+ */
#ifndef MODULE
-/* when built-in xor.o must initialize before drivers/md/md.o */
-core_initcall(register_xor_blocks);
+__initcall(calibrate_xor_blocks);
#endif
-
-module_init(calibrate_xor_blocks);
+core_initcall(xor_init);
module_exit(xor_exit);