summaryrefslogtreecommitdiff
path: root/net/mptcp/subflow.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-11-16 10:46:10 -0800
committerJakub Kicinski <kuba@kernel.org>2020-11-16 10:46:11 -0800
commit72308ecbf33b145641aba61071be31a85ebfd92c (patch)
tree99955cee5d38f5e3f121272f68d174d3529e8c92 /net/mptcp/subflow.c
parentc0a645a7f94409043b5b1d577590bee9b2ce5333 (diff)
parent7ed90803a213736290bdcf971764ddb8ff3fa44f (diff)
Merge branch 'mptcp-improve-multiple-xmit-streams-support'
Paolo Abeni says: ==================== mptcp: improve multiple xmit streams support This series improves MPTCP handling of multiple concurrent xmit streams. The to-be-transmitted data is enqueued to a subflow only when the send window is open, keeping the subflows xmit queue shorter and allowing for faster switch-over. The above requires a more accurate msk socket state tracking and some additional infrastructure to allow pushing the data pending in the msk xmit queue as soon as the MPTCP's send window opens (patches 6-10). As a side effect, the MPTCP socket could enqueue data to subflows after close() time - to completely spooling the data sitting in the msk xmit queue. Dealing with the requires some infrastructure and core TCP changes (patches 1-5) Finally, patches 11-12 introduce a more accurate tracking of the other end's receive window. Overall this refactor the MPTCP xmit path, without introducing new features - the new code is covered by the existing self-tests. v2 -> v3: - rebased, - fixed checkpatch issue in patch 1/13 - fixed some state tracking issues in patch 8/13 v1 -> v2: - this is just a repost, to cope with patchwork issues, no changes at all ==================== Link: https://lore.kernel.org/r/cover.1605458224.git.pabeni@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/subflow.c')
-rw-r--r--net/mptcp/subflow.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index ac4a1fe3550b..794259789194 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -997,17 +997,16 @@ static void subflow_data_ready(struct sock *sk)
static void subflow_write_space(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct socket *sock = READ_ONCE(sk->sk_socket);
struct sock *parent = subflow->conn;
if (!sk_stream_is_writeable(sk))
return;
- if (sk_stream_is_writeable(parent)) {
- set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
- smp_mb__after_atomic();
- /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
- sk_stream_write_space(parent);
- }
+ if (sock && sk_stream_is_writeable(parent))
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+
+ sk_stream_write_space(parent);
}
static struct inet_connection_sock_af_ops *
@@ -1125,6 +1124,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
if (err && err != -EINPROGRESS)
goto failed;
+ sock_hold(ssk);
spin_lock_bh(&msk->join_list_lock);
list_add_tail(&subflow->node, &msk->join_list);
spin_unlock_bh(&msk->join_list_lock);
@@ -1132,6 +1132,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
return err;
failed:
+ subflow->disposable = 1;
sock_release(sf);
return err;
}
@@ -1254,7 +1255,6 @@ static void subflow_state_change(struct sock *sk)
mptcp_data_ready(parent, sk);
if (__mptcp_check_fallback(mptcp_sk(parent)) &&
- !(parent->sk_shutdown & RCV_SHUTDOWN) &&
!subflow->rx_eof && subflow_is_done(sk)) {
subflow->rx_eof = 1;
mptcp_subflow_eof(parent);
@@ -1297,17 +1297,26 @@ out:
return err;
}
-static void subflow_ulp_release(struct sock *sk)
+static void subflow_ulp_release(struct sock *ssk)
{
- struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
+ struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+ bool release = true;
+ struct sock *sk;
if (!ctx)
return;
- if (ctx->conn)
- sock_put(ctx->conn);
+ sk = ctx->conn;
+ if (sk) {
+ /* if the msk has been orphaned, keep the ctx
+ * alive, will be freed by mptcp_done()
+ */
+ release = ctx->disposable;
+ sock_put(sk);
+ }
- kfree_rcu(ctx, rcu);
+ if (release)
+ kfree_rcu(ctx, rcu);
}
static void subflow_ulp_clone(const struct request_sock *req,