summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-10-29 18:32:23 -0700
committerJakub Kicinski <kuba@kernel.org>2025-10-29 18:32:24 -0700
commit0dd1be4fe06a1f3a14a517446b49afaa6ac5fa94 (patch)
tree32f105d8c471e8cbf409d141bed45fd81b1ec5ad /include
parente98cda764aa9c27f6810d08bd7bf2e8071535990 (diff)
parent426e9da3b28404b1edcbae401231fb378150d99d (diff)
Merge branch 'tls-introduce-and-use-rx-async-resync-request-cancel-function'
Tariq Toukan says: ==================== tls: Introduce and use RX async resync request cancel function This series by Shahar introduces RX async resync request cancel function in tls module, and uses it in mlx5e driver. For a device-offloaded TLS RX connection, the TLS module increments rcd_delta each time a new TLS record is received, tracking the distance from the original resync request. In the meanwhile, the device is queried and is expected to respond, asynchronously. However, if the device response is delayed or fails (e.g due to unstable connection and device getting out of tracking, hardware errors, resource exhaustion etc.), the TLS module keeps logging and incrementing rcd_delta, which can lead to a WARN() when rcd_delta exceeds the threshold. This series improves this code area by canceling the resync request when spotting an issue with the device response. ==================== Link: https://patch.msgid.link/1761508983-937977-1-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/net/tls.h25
1 files changed, 13 insertions, 12 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 857340338b69..c7bcdb3afad7 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -451,25 +451,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
/* Log all TLS record header TCP sequences in [seq, seq+len] */
static inline void
-tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async,
+ __be32 seq, u16 len)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
-
- atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
- rx_ctx->resync_async->loglen = 0;
- rx_ctx->resync_async->rcd_delta = 0;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
}
static inline void
-tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async,
+ __be32 seq)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
- atomic64_set(&rx_ctx->resync_async->req,
- ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+static inline void
+tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
+{
+ atomic64_set(&resync_async->req, 0);
}
static inline void