diff options
| author | David S. Miller <davem@davemloft.net> | 2019-06-11 12:22:27 -0700 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-06-11 12:22:27 -0700 |
| commit | 758a0a4d60ab8a5bfed1e8365c6faa00d8b8dbd5 (patch) | |
| tree | 540add3d2e40be7263d64cde02b30a0ca8dd7cbe /include | |
| parent | 46088059aaa0ec2143a1005bfc58d2a7c741b93f (diff) | |
| parent | 9ed431c1d7cf8c394aa7562ca6b84f3eeb8c78a8 (diff) | |
Merge branch 'tls-add-support-for-kernel-driven-resync-and-nfp-RX-offload'
Jakub Kicinski says:
====================
tls: add support for kernel-driven resync and nfp RX offload
This series adds TLS RX offload for NFP and completes the offload
by providing resync strategies. When TLS data stream looses segments
or experiences reorder NIC can no longer perform in line offload.
Resyncs provide information about placement of records in the
stream so that offload can resume.
Existing TLS resync mechanisms are not a great fit for the NFP.
In particular the TX resync is hard to implement for packet-centric
NICs. This patchset adds an ability to perform TX resync in a way
similar to the way initial sync is done - by calling down to the
driver when new record is created after driver indicated sync had
been lost.
Similarly on the RX side, we try to wait for a gap in the stream
and send record information for the next record. This works very
well for RPC workloads which are the primary focus at this time.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
| -rw-r--r-- | include/net/tls.h | 63 |
1 files changed, 59 insertions, 4 deletions
diff --git a/include/net/tls.h b/include/net/tls.h index 3ecf45adb707..63e473420b00 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -62,6 +62,7 @@ #define TLS_DEVICE_NAME_MAX 32 #define MAX_IV_SIZE 16 +#define TLS_MAX_REC_SEQ_SIZE 8 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. * @@ -211,6 +212,11 @@ struct tls_offload_context_tx { enum tls_context_flags { TLS_RX_SYNC_RUNNING = 0, + /* Unlike RX where resync is driven entirely by the core in TX only + * the driver knows when things went out of sync, so we need the flag + * to be atomic. + */ + TLS_TX_SYNC_SCHED = 1, }; struct cipher_context { @@ -298,14 +304,38 @@ struct tlsdev_ops { void (*tls_dev_del)(struct net_device *netdev, struct tls_context *ctx, enum tls_offload_ctx_dir direction); - void (*tls_dev_resync_rx)(struct net_device *netdev, - struct sock *sk, u32 seq, u64 rcd_sn); + void (*tls_dev_resync)(struct net_device *netdev, + struct sock *sk, u32 seq, u8 *rcd_sn, + enum tls_offload_ctx_dir direction); +}; + +enum tls_offload_sync_type { + TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, + TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, }; +#define TLS_DEVICE_RESYNC_NH_START_IVAL 2 +#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 + struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; - atomic64_t resync_req; + enum tls_offload_sync_type resync_type; + /* this member is set regardless of resync_type, to avoid branches */ + u8 resync_nh_reset:1; + /* CORE_NEXT_HINT-only member, but use the hole here */ + u8 resync_nh_do_now:1; + union { + /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ + struct { + atomic64_t resync_req; + }; + /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ + struct { + u32 decrypted_failed; + u32 decrypted_tgt; + } resync_nh; + }; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough @@ -586,6 +616,31 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); } +static inline void +tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + + tls_offload_ctx_rx(tls_ctx)->resync_type = type; +} + +static inline void tls_offload_tx_resync_request(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + + WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); +} + +/* Driver's seq tracking has to be disabled until resync succeeded */ +static inline bool tls_offload_tx_resync_pending(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + bool ret; + + ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); + smp_mb__after_atomic(); + return ret; +} int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); @@ -607,6 +662,6 @@ int tls_sw_fallback_init(struct sock *sk, int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); void tls_device_offload_cleanup_rx(struct sock *sk); -void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn); +void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); #endif /* _TLS_OFFLOAD_H */ |
