From e5a2c899957659cd1a9f789bc462f9c0b35f5150 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Wed, 5 Nov 2014 00:23:04 +0100 Subject: fast_hash: avoid indirect function calls By default the arch_fast_hash hashing function pointers are initialized to jhash(2). If during boot-up a CPU with SSE4.2 is detected they get updated to the CRC32 ones. This dispatching scheme incurs a function pointer lookup and indirect call for every hashing operation. rhashtable as a user of arch_fast_hash e.g. stores pointers to hashing functions in its structure, too, causing two indirect branches per hashing operation. Using alternative_call we can get away with one of those indirect branches. Acked-by: Daniel Borkmann Cc: Thomas Graf Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- arch/x86/include/asm/hash.h | 51 ++++++++++++++++++++++++++++++++++++++++----- arch/x86/lib/hash.c | 29 +++++++++++++++----------- 2 files changed, 63 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h index e8c58f88b1d4..a881d784f044 100644 --- a/arch/x86/include/asm/hash.h +++ b/arch/x86/include/asm/hash.h @@ -1,7 +1,48 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H +#ifndef __ASM_X86_HASH_H +#define __ASM_X86_HASH_H -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); +#include +#include -#endif /* _ASM_X86_HASH_H */ +u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed); +u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed); + +/* + * non-inline versions of jhash so gcc does not need to generate + * duplicate code in every object file + */ +u32 __jhash(const void *data, u32 len, u32 seed); +u32 __jhash2(const u32 *data, u32 len, u32 seed); + +/* + * for documentation of these functions please look into + * + */ + +static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) +{ + u32 hash; + + alternative_call(__jhash, __intel_crc4_2_hash, X86_FEATURE_XMM4_2, +#ifdef CONFIG_X86_64 + "=a" (hash), "D" (data), "S" (len), "d" (seed)); +#else + "=a" (hash), "a" (data), "d" (len), "c" (seed)); +#endif + return hash; +} + +static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) +{ + u32 hash; + + alternative_call(__jhash2, __intel_crc4_2_hash2, X86_FEATURE_XMM4_2, +#ifdef CONFIG_X86_64 + "=a" (hash), "D" (data), "S" (len), "d" (seed)); +#else + "=a" (hash), "a" (data), "d" (len), "c" (seed)); +#endif + return hash; +} + +#endif /* __ASM_X86_HASH_H */ diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c index ff4fa51a5b1f..e14327198835 100644 --- a/arch/x86/lib/hash.c +++ b/arch/x86/lib/hash.c @@ -31,13 +31,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include -#include - #include #include #include +#include +#include + static inline u32 crc32_u32(u32 crc, u32 val) { #ifdef CONFIG_AS_CRC32 @@ -48,7 +48,7 @@ static inline u32 crc32_u32(u32 crc, u32 val) return crc; } -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) +u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) { const u32 *p32 = (const u32 *) data; u32 i, tmp = 0; @@ -71,22 +71,27 @@ static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) return seed; } +EXPORT_SYMBOL(__intel_crc4_2_hash); -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) +u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) { - const u32 *p32 = (const u32 *) data; u32 i; for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); + seed = crc32_u32(seed, *data++); return seed; } +EXPORT_SYMBOL(__intel_crc4_2_hash2); -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) +u32 __jhash(const void *data, u32 len, u32 seed) { - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } + return jhash(data, len, seed); +} +EXPORT_SYMBOL(__jhash); + +u32 __jhash2(const u32 *data, u32 len, u32 seed) +{ + return jhash2(data, len, seed); } +EXPORT_SYMBOL(__jhash2); -- cgit v1.2.3 From a77f9c5dcdf8480a93332792c336fa2bf9d31229 Mon Sep 17 00:00:00 2001 From: Jay Vosburgh Date: Fri, 14 Nov 2014 11:05:06 -0800 Subject: Revert "fast_hash: avoid indirect function calls" This reverts commit e5a2c899957659cd1a9f789bc462f9c0b35f5150. Commit e5a2c899 introduced an alternative_call, arch_fast_hash2, that selects between __jhash2 and __intel_crc4_2_hash based on the X86_FEATURE_XMM4_2. Unfortunately, the alternative_call system does not appear to be suitable for use with C functions, as register usage is not handled properly for the called functions. The __jhash2 function in particular clobbers registers that are not preserved when called via alternative_call, resulting in a panic for direct callers of arch_fast_hash2 on older CPUs lacking sse4_2. It is possible that __intel_crc4_2_hash works merely by chance because it uses fewer registers. This commit was suggested as the source of the problem by Jesse Gross . Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller --- arch/x86/include/asm/hash.h | 51 +++++---------------------------------------- arch/x86/lib/hash.c | 29 +++++++++++--------------- 2 files changed, 17 insertions(+), 63 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h index a881d784f044..e8c58f88b1d4 100644 --- a/arch/x86/include/asm/hash.h +++ b/arch/x86/include/asm/hash.h @@ -1,48 +1,7 @@ -#ifndef __ASM_X86_HASH_H -#define __ASM_X86_HASH_H +#ifndef _ASM_X86_HASH_H +#define _ASM_X86_HASH_H -#include -#include +struct fast_hash_ops; +extern void setup_arch_fast_hash(struct fast_hash_ops *ops); -u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed); -u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed); - -/* - * non-inline versions of jhash so gcc does not need to generate - * duplicate code in every object file - */ -u32 __jhash(const void *data, u32 len, u32 seed); -u32 __jhash2(const u32 *data, u32 len, u32 seed); - -/* - * for documentation of these functions please look into - * - */ - -static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed) -{ - u32 hash; - - alternative_call(__jhash, __intel_crc4_2_hash, X86_FEATURE_XMM4_2, -#ifdef CONFIG_X86_64 - "=a" (hash), "D" (data), "S" (len), "d" (seed)); -#else - "=a" (hash), "a" (data), "d" (len), "c" (seed)); -#endif - return hash; -} - -static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed) -{ - u32 hash; - - alternative_call(__jhash2, __intel_crc4_2_hash2, X86_FEATURE_XMM4_2, -#ifdef CONFIG_X86_64 - "=a" (hash), "D" (data), "S" (len), "d" (seed)); -#else - "=a" (hash), "a" (data), "d" (len), "c" (seed)); -#endif - return hash; -} - -#endif /* __ASM_X86_HASH_H */ +#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c index e14327198835..ff4fa51a5b1f 100644 --- a/arch/x86/lib/hash.c +++ b/arch/x86/lib/hash.c @@ -31,13 +31,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include +#include + #include #include #include -#include -#include - static inline u32 crc32_u32(u32 crc, u32 val) { #ifdef CONFIG_AS_CRC32 @@ -48,7 +48,7 @@ static inline u32 crc32_u32(u32 crc, u32 val) return crc; } -u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) +static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) { const u32 *p32 = (const u32 *) data; u32 i, tmp = 0; @@ -71,27 +71,22 @@ u32 __intel_crc4_2_hash(const void *data, u32 len, u32 seed) return seed; } -EXPORT_SYMBOL(__intel_crc4_2_hash); -u32 __intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) +static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) { + const u32 *p32 = (const u32 *) data; u32 i; for (i = 0; i < len; i++) - seed = crc32_u32(seed, *data++); + seed = crc32_u32(seed, *p32++); return seed; } -EXPORT_SYMBOL(__intel_crc4_2_hash2); -u32 __jhash(const void *data, u32 len, u32 seed) +void __init setup_arch_fast_hash(struct fast_hash_ops *ops) { - return jhash(data, len, seed); -} -EXPORT_SYMBOL(__jhash); - -u32 __jhash2(const u32 *data, u32 len, u32 seed) -{ - return jhash2(data, len, seed); + if (cpu_has_xmm4_2) { + ops->hash = intel_crc4_2_hash; + ops->hash2 = intel_crc4_2_hash2; + } } -EXPORT_SYMBOL(__jhash2); -- cgit v1.2.3 From 769e0de6475e5512f88bfb4dbf6d6323fd23514f Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Sat, 29 Nov 2014 14:46:13 -0800 Subject: bpf: x86: fix epilogue generation for eBPF programs classic BPF has a restriction that last insn is always BPF_RET. eBPF doesn't have BPF_RET instruction and this restriction. It has BPF_EXIT insn which can appear anywhere in the program one or more times and it doesn't have to be last insn. Fix eBPF JIT to emit epilogue when first BPF_EXIT is seen and all other BPF_EXIT instructions will be emitted as jump. Since jump offset to epilogue is computed as: jmp_offset = ctx->cleanup_addr - addrs[i] we need to change type of cleanup_addr to signed to compute the offset as: (long long) ((int)20 - (int)30) instead of: (long long) ((unsigned int)20 - (int)30) Fixes: 622582786c9e ("net: filter: x86: internal BPF JIT") Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3f627345d51c..7e90244c84e3 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size) } struct jit_context { - unsigned int cleanup_addr; /* epilogue code offset */ + int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; }; @@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); + bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; int i; int proglen = 0; @@ -854,10 +855,11 @@ common_load: goto common_load; case BPF_JMP | BPF_EXIT: - if (i != insn_cnt - 1) { + if (seen_exit) { jmp_offset = ctx->cleanup_addr - addrs[i]; goto emit_jmp; } + seen_exit = true; /* update cleanup_addr */ ctx->cleanup_addr = proglen; /* mov rbx, qword ptr [rbp-X] */ -- cgit v1.2.3 From d148134be51fe05271ec8d47fe8c815bdee2b8e7 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 4 Dec 2014 15:00:48 -0800 Subject: x86: bpf_jit_comp: Reduce is_ereg() code size Use the (1 << reg) & mask trick to reduce code size. x86-64 size difference -O2 without profiling for various gcc versions: $ size arch/x86/net/bpf_jit_comp.o* text data bss dec hex filename 9266 4 0 9270 2436 arch/x86/net/bpf_jit_comp.o.4.4.new 10042 4 0 10046 273e arch/x86/net/bpf_jit_comp.o.4.4.old 9109 4 0 9113 2399 arch/x86/net/bpf_jit_comp.o.4.6.new 9717 4 0 9721 25f9 arch/x86/net/bpf_jit_comp.o.4.6.old 8789 4 0 8793 2259 arch/x86/net/bpf_jit_comp.o.4.7.new 10245 4 0 10249 2809 arch/x86/net/bpf_jit_comp.o.4.7.old 9671 4 0 9675 25cb arch/x86/net/bpf_jit_comp.o.4.9.new 10679 4 0 10683 29bb arch/x86/net/bpf_jit_comp.o.4.9.old Signed-off-by: Joe Perches Tested-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3f627345d51c..09e2ceaf13cb 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -135,11 +135,11 @@ static const int reg2hex[] = { */ static inline bool is_ereg(u32 reg) { - if (reg == BPF_REG_5 || reg == AUX_REG || - (reg >= BPF_REG_7 && reg <= BPF_REG_9)) - return true; - else - return false; + return (1 << reg) & (BIT(BPF_REG_5) | + BIT(AUX_REG) | + BIT(BPF_REG_7) | + BIT(BPF_REG_8) | + BIT(BPF_REG_9)); } /* add modifiers if 'reg' maps to x64 registers r8..r15 */ -- cgit v1.2.3 From 5cccc702fd54e5c3dc5ee16a129770aae79ae60b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 4 Dec 2014 17:01:24 -0800 Subject: x86: bpf_jit_comp: Remove inline from static function definitions Let the compiler decide instead. No change in object size x86-64 -O2 no profiling Signed-off-by: Joe Perches Suggested-by: Eric Dumazet Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 09e2ceaf13cb..626e01377a01 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; extern u8 sk_load_byte_negative_offset[]; -static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) *ptr = bytes; @@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT4_off32(b1, b2, b3, b4, off) \ do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) -static inline bool is_imm8(int value) +static bool is_imm8(int value) { return value <= 127 && value >= -128; } -static inline bool is_simm32(s64 value) +static bool is_simm32(s64 value) { return value == (s64) (s32) value; } @@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JGE 0x7D #define X86_JG 0x7F -static inline void bpf_flush_icache(void *start, void *end) +static void bpf_flush_icache(void *start, void *end) { mm_segment_t old_fs = get_fs(); @@ -133,7 +133,7 @@ static const int reg2hex[] = { * which need extra byte of encoding. * rax,rcx,...,rbp have simpler encoding */ -static inline bool is_ereg(u32 reg) +static bool is_ereg(u32 reg) { return (1 << reg) & (BIT(BPF_REG_5) | BIT(AUX_REG) | @@ -143,14 +143,14 @@ static inline bool is_ereg(u32 reg) } /* add modifiers if 'reg' maps to x64 registers r8..r15 */ -static inline u8 add_1mod(u8 byte, u32 reg) +static u8 add_1mod(u8 byte, u32 reg) { if (is_ereg(reg)) byte |= 1; return byte; } -static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) +static u8 add_2mod(u8 byte, u32 r1, u32 r2) { if (is_ereg(r1)) byte |= 1; @@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) } /* encode 'dst_reg' register into x64 opcode 'byte' */ -static inline u8 add_1reg(u8 byte, u32 dst_reg) +static u8 add_1reg(u8 byte, u32 dst_reg) { return byte + reg2hex[dst_reg]; } /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ -static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) +static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } -- cgit v1.2.3 From 0cb6c969ed9de43687abdfc63714b6fe4385d2fc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 10 Dec 2014 16:33:12 +0100 Subject: net, lib: kill arch_fast_hash library bits As there are now no remaining users of arch_fast_hash(), lets kill it entirely. This basically reverts commit 71ae8aac3e19 ("lib: introduce arch optimized hash library") and follow-up work, that is f.e., commit 237217546d44 ("lib: hash: follow-up fixups for arch hash"), commit e3fec2f74f7f ("lib: Add missing arch generic-y entries for asm-generic/hash.h") and last but not least commit 6a02652df511 ("perf tools: Fix include for non x86 architectures"). Cc: Francesco Fusco Cc: Thomas Graf Cc: Arnaldo Carvalho de Melo Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/x86/include/asm/hash.h | 7 ---- arch/x86/lib/Makefile | 2 +- arch/x86/lib/hash.c | 92 --------------------------------------------- 3 files changed, 1 insertion(+), 100 deletions(-) delete mode 100644 arch/x86/include/asm/hash.h delete mode 100644 arch/x86/lib/hash.c (limited to 'arch/x86') diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h deleted file mode 100644 index e8c58f88b1d4..000000000000 --- a/arch/x86/include/asm/hash.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_X86_HASH_H -#define _ASM_X86_HASH_H - -struct fast_hash_ops; -extern void setup_arch_fast_hash(struct fast_hash_ops *ops); - -#endif /* _ASM_X86_HASH_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index db92793b7e23..1530afb07c85 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -23,7 +23,7 @@ lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o -obj-y += msr.o msr-reg.o msr-reg-export.o hash.o +obj-y += msr.o msr-reg.o msr-reg-export.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c deleted file mode 100644 index ff4fa51a5b1f..000000000000 --- a/arch/x86/lib/hash.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Some portions derived from code covered by the following notice: - * - * Copyright (c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#include -#include -#include - -static inline u32 crc32_u32(u32 crc, u32 val) -{ -#ifdef CONFIG_AS_CRC32 - asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val)); -#else - asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val)); -#endif - return crc; -} - -static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i, tmp = 0; - - for (i = 0; i < len / 4; i++) - seed = crc32_u32(seed, *p32++); - - switch (len & 3) { - case 3: - tmp |= *((const u8 *) p32 + 2) << 16; - /* fallthrough */ - case 2: - tmp |= *((const u8 *) p32 + 1) << 8; - /* fallthrough */ - case 1: - tmp |= *((const u8 *) p32); - seed = crc32_u32(seed, tmp); - break; - } - - return seed; -} - -static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed) -{ - const u32 *p32 = (const u32 *) data; - u32 i; - - for (i = 0; i < len; i++) - seed = crc32_u32(seed, *p32++); - - return seed; -} - -void __init setup_arch_fast_hash(struct fast_hash_ops *ops) -{ - if (cpu_has_xmm4_2) { - ops->hash = intel_crc4_2_hash; - ops->hash2 = intel_crc4_2_hash2; - } -} -- cgit v1.2.3