summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-02-12 05:52:41 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-12 05:52:41 -0500
commit667f00630ebefc4d73aa105c6ab254e4aec867f8 (patch)
treee8898fbf9bcbf38de21be4e89b93eed0816a6c52 /include/linux
parente51271d4ce7b229f5c02903e3c44bf92c0dbef6b (diff)
parente8ae7b000e64cf76283c72cae5e3ecd246618ef4 (diff)
Merge branch 'local-checksum-offload'
Edward Cree says: ==================== Local Checksum Offload Re-tested VxLAN; everything else is unchanged from v4. Changes from v4: * Rebased series to fix conflicts with vxlan/vxlan6 merge. Changes from v3: * Fixed inverted checksum values introduced in v3. * Don't mangle zero checksums in GRE. * Clear skb->encapsulation in iptunnel_handle_offloads when not using CHECKSUM_PARTIAL, lest drivers incorrectly interpret that as a request for inner checksum offload. Changes from v2: * Added support for IPv4 GRE. * Split out 'always set up for checksum offload' into its own patch. * Removed csum_help from iptunnel_handle_offloads. * Rewrote LCO callers to only fold once. * Simplified nocheck handling. Changes from v1: * Enabled support in more encapsulation protocols. I think it now covers everything except GRE. * Wrote up some documentation covering TX checksum offload, LCO and RCO. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h26
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6ec86f1a2ed9..39206751463e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3702,5 +3702,31 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
return hdr_len + skb_gso_transport_seglen(skb);
}
+/* Local Checksum Offload.
+ * Compute outer checksum based on the assumption that the
+ * inner checksum will be offloaded later.
+ * See Documentation/networking/checksum-offloads.txt for
+ * explanation of how this works.
+ * Fill in outer checksum adjustment (e.g. with sum of outer
+ * pseudo-header) before calling.
+ * Also ensure that inner checksum is in linear data area.
+ */
+static inline __wsum lco_csum(struct sk_buff *skb)
+{
+ char *inner_csum_field;
+ __wsum csum;
+
+ /* Start with complement of inner checksum adjustment */
+ inner_csum_field = skb->data + skb_checksum_start_offset(skb) +
+ skb->csum_offset;
+ csum = ~csum_unfold(*(__force __sum16 *)inner_csum_field);
+ /* Add in checksum of our headers (incl. outer checksum
+ * adjustment filled in by caller)
+ */
+ csum = skb_checksum(skb, 0, skb_checksum_start_offset(skb), csum);
+ /* The result is the checksum from skb->data to end of packet */
+ return csum;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */