diff options
Diffstat (limited to 'net/tls/tls_device.c')
-rw-r--r-- | net/tls/tls_device.c | 44 |
1 files changed, 19 insertions, 25 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 14dedb24fa7b..e225c81e6b35 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -89,22 +89,6 @@ static void tls_device_gc_task(struct work_struct *work) } } -static void tls_device_attach(struct tls_context *ctx, struct sock *sk, - struct net_device *netdev) -{ - if (sk->sk_destruct != tls_device_sk_destruct) { - refcount_set(&ctx->refcount, 1); - dev_hold(netdev); - ctx->netdev = netdev; - spin_lock_irq(&tls_device_lock); - list_add_tail(&ctx->list, &tls_device_list); - spin_unlock_irq(&tls_device_lock); - - ctx->sk_destruct = sk->sk_destruct; - sk->sk_destruct = tls_device_sk_destruct; - } -} - static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { unsigned long flags; @@ -199,7 +183,7 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) * socket and no in-flight SKBs associated with this * socket, so it is safe to free all the resources. */ -void tls_device_sk_destruct(struct sock *sk) +static void tls_device_sk_destruct(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); @@ -217,7 +201,6 @@ void tls_device_sk_destruct(struct sock *sk) if (refcount_dec_and_test(&tls_ctx->refcount)) tls_device_queue_ctx_destruction(tls_ctx); } -EXPORT_SYMBOL(tls_device_sk_destruct); void tls_device_free_resources_tx(struct sock *sk) { @@ -584,7 +567,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) rx_ctx = tls_offload_ctx_rx(tls_ctx); resync_req = atomic64_read(&rx_ctx->resync_req); - req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); + req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); is_req_pending = resync_req; if (unlikely(is_req_pending) && req_seq == seq && @@ -699,6 +682,22 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) tls_device_reencrypt(sk, skb); } +static void tls_device_attach(struct tls_context *ctx, struct sock *sk, + struct net_device *netdev) +{ + if (sk->sk_destruct != tls_device_sk_destruct) { + refcount_set(&ctx->refcount, 1); + dev_hold(netdev); + ctx->netdev = netdev; + spin_lock_irq(&tls_device_lock); + list_add_tail(&ctx->list, &tls_device_list); + spin_unlock_irq(&tls_device_lock); + + ctx->sk_destruct = sk->sk_destruct; + sk->sk_destruct = tls_device_sk_destruct; + } +} + int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { u16 nonce_size, tag_size, iv_size, rec_seq_size; @@ -882,8 +881,6 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) } if (!(netdev->features & NETIF_F_HW_TLS_RX)) { - pr_err_ratelimited("%s: netdev %s with no TLS offload\n", - __func__, netdev->name); rc = -ENOTSUPP; goto release_netdev; } @@ -911,11 +908,8 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, &ctx->crypto_recv.info, tcp_sk(sk)->copied_seq); - if (rc) { - pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", - __func__); + if (rc) goto free_sw_resources; - } tls_device_attach(ctx, sk, netdev); goto release_netdev; |