summaryrefslogtreecommitdiff
path: root/net/mptcp/subflow.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-02-19 19:05:30 -0800
committerJakub Kicinski <kuba@kernel.org>2025-02-19 19:05:31 -0800
commit22af030f01f9a0fe7fde73970df6632f7d9c47fd (patch)
tree36219ec22b92aa151525292a16cc1ef06d4c3b3c /net/mptcp/subflow.c
parent9a6c2b2bdd5ed46f3ab364c975ea7b772b29aec2 (diff)
parente0ca4057e0ecd4b10f27892fe6f1ac2a7fd25ab4 (diff)
Merge branch 'mptcp-rx-path-refactor'
Matthieu Baerts says: ==================== mptcp: rx path refactor Paolo worked on this RX path refactor for these two main reasons: - Currently, the MPTCP RX path introduces quite a bit of 'exceptional' accounting/locking processing WRT to plain TCP, adding up to the implementation complexity in a miserable way. - The performance gap WRT plain TCP for single subflow connections is quite measurable. The present refactor addresses both the above items: most of the additional complexity is dropped, and single stream performances increase measurably, from 55Gbps to 71Gbps in Paolo's loopback test. As a reference, plain TCP was around 84Gbps on the same host. The above comes to a price: the patch are invasive, even in subtle ways. Note: patch 5/7 removes the sk_forward_alloc_get() helper, which caused some trivial modifications in different places in the net tree: sockets, IPv4, sched. That's why a few more people have been Cc here. Feel free to only look at this patch 5/7. ==================== Link: https://patch.msgid.link/20250218-net-next-mptcp-rx-path-refactor-v1-0-4a47d90d7998@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r--net/mptcp/subflow.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index fd021cf8286e..d2caffa56bdd 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -802,9 +802,6 @@ void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
subflow_set_remote_key(msk, subflow, mp_opt);
WRITE_ONCE(subflow->fully_established, true);
WRITE_ONCE(msk->fully_established, true);
-
- if (subflow->is_mptfo)
- __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
}
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
@@ -1271,7 +1268,12 @@ out:
subflow->map_valid = 0;
}
-/* sched mptcp worker to remove the subflow if no more data is pending */
+static bool subflow_is_done(const struct sock *sk)
+{
+ return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
+}
+
+/* sched mptcp worker for subflow cleanup if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
struct sock *sk = (struct sock *)msk;
@@ -1281,8 +1283,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
inet_sk_state_load(sk) != TCP_ESTABLISHED)))
return;
- if (skb_queue_empty(&ssk->sk_receive_queue) &&
- !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ if (!skb_queue_empty(&ssk->sk_receive_queue))
+ return;
+
+ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ mptcp_schedule_work(sk);
+
+ /* when the fallback subflow closes the rx side, trigger a 'dummy'
+ * ingress data fin, so that the msk state will follow along
+ */
+ if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) &&
+ msk->first == ssk &&
+ mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
mptcp_schedule_work(sk);
}
@@ -1842,11 +1854,6 @@ static void __subflow_state_change(struct sock *sk)
rcu_read_unlock();
}
-static bool subflow_is_done(const struct sock *sk)
-{
- return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
-}
-
static void subflow_state_change(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -1873,13 +1880,6 @@ static void subflow_state_change(struct sock *sk)
subflow_error_report(sk);
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
-
- /* when the fallback subflow closes the rx side, trigger a 'dummy'
- * ingress data fin, so that the msk state will follow along
- */
- if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
- mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
- mptcp_schedule_work(parent);
}
void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)