mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
asm-generic: Improve csum_fold
This csum_fold implementation introduced into arch/arc by Vineet Gupta is better than the default implementation on at least arc, x86, and riscv. Using GCC trunk and compiling non-inlined version, this implementation has 41.6667%, 25% fewer instructions on riscv64, x86-64 respectively with -O3 optimization. Most implmentations override this default in asm, but this should be more performant than all of those other implementations except for arm which has barrel shifting and sparc32 which has a carry flag. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> Reviewed-by: David Laight <david.laight@aculab.com> Link: https://lore.kernel.org/r/20240108-optimize_checksum-v15-1-1c50de5f2167@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
b85ea95d08
commit
1e7196fa5b
@ -2,6 +2,8 @@
|
||||
#ifndef __ASM_GENERIC_CHECKSUM_H
|
||||
#define __ASM_GENERIC_CHECKSUM_H
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* computes the checksum of a memory block at buff, length len,
|
||||
* and adds in "sum" (32-bit)
|
||||
@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
|
||||
static inline __sum16 csum_fold(__wsum csum)
|
||||
{
|
||||
u32 sum = (__force u32)csum;
|
||||
sum = (sum & 0xffff) + (sum >> 16);
|
||||
sum = (sum & 0xffff) + (sum >> 16);
|
||||
return (__force __sum16)~sum;
|
||||
return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user