summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-11-25 21:03:33 -0800
committerJakub Kicinski <kuba@kernel.org>2021-11-25 21:03:33 -0800
commit35bf8c86eeb8ae609f61c43aeab3b530fedcf1b4 (patch)
tree7f17a01eb6164d66c713a0d18fde1ffa231707e4 /include/linux
parent703319094c9c2bf34f65d3496ccb350149fdd14b (diff)
parent29c3002644bdd653f6ec6407d25135d0a4f7cefb (diff)
Merge branch 'net-small-csum-optimizations'
Eric Dumazet says: ==================== net: small csum optimizations After recent x86 csum_partial() optimizations, we can more easily see in kernel profiles costs of add/adc operations that could be avoided, by feeding a non zero third argument to csum_partial() ==================== Link: https://lore.kernel.org/r/20211124202446.2917972-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index eba256af64a5..eae4bd3237a4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3485,7 +3485,11 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- __skb_postpull_rcsum(skb, start, len, 0);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = ~csum_partial(start, len, ~skb->csum);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void