summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-06-12 10:01:30 -0400
committerDavid S. Miller <davem@davemloft.net>2017-06-12 10:01:30 -0400
commit073cf9e20c333ab29744717a23f9e43ec7512a20 (patch)
tree0378a13224c5045bedeee8a4d0b57e579e45de46 /include/linux
parent78d6102256fe46fcd0c78798a9391cf1f112f117 (diff)
parentb65ac44674dd9c9c1ad11ebb3ec9e8882990bdb4 (diff)
Merge branch 'udp-reduce-cache-pressure'
Paolo Abeni says: ==================== udp: reduce cache pressure In the most common use case, many skb fields are not used by recvmsg(), and the few ones actually accessed lays on cold cachelines, which leads to several cache miss per packet. This patch series attempts to reduce such misses with different strategies: * caching the interesting fields in the scratched space * avoid accessing at all uninteresting fields * prefetching Tested using the udp_sink program by Jesper[1] as the receiver, an h/w l4 rx hash on the ingress nic, so that the number of ingress nic rx queues hit by the udp traffic could be controlled via ethtool -L. The udp_sink program was bound to the first idle cpu, to get more stable numbers. On a single numa node receiver: nic rx queues vanilla patched kernel delta 1 1850 kpps 1850 kpps 0% 2 2370 kpps 2700 kpps 13.9% 16 2000 kpps 2220 kpps 11% [1] https://github.com/netoptimizer/network-testing/blob/master/src/udp_sink.c v1 -> v2: - replaced secpath_reset() with skb_release_head_state() - changed udp_dev_scratch fields types to u{32,16} variant, replaced bitfield with bool v2 -> v3: - no changes, tested against apachebench for performances regression ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d460a4cbda1c..d66d4feaac86 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -867,10 +867,25 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb)
#endif
}
+/* decrement the reference count and return true if we can free the skb */
+static inline bool skb_unref(struct sk_buff *skb)
+{
+ if (unlikely(!skb))
+ return false;
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return false;
+
+ return true;
+}
+
+void skb_release_head_state(struct sk_buff *skb);
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
+void consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;